@@ -68,7 +68,7 @@ def train_a_font(input_filters_dict,output_feature_list, nEpochs=5000):
68
68
69
69
for i ,nm in enumerate (output_feature_list ):
70
70
71
- # features[0], is the target, ' m_label_one_hot'
71
+ # features[0], is always the target. For instance it may be m_label_one_hot
72
72
# the second features[1] is the 'image' that is passed to the convolution layers
73
73
# Any additional features bypass the convolution layers and go directly
74
74
# into the fully connected layer.
@@ -225,14 +225,14 @@ def max_pool_2x2(x):
225
225
226
226
with tf .name_scope ("xent" ) as scope :
227
227
# 1e-8 added to eliminate the crash of training when taking log of 0
228
- cross_entropy = - tf .reduce_sum (ph . m_label_one_hot * tf .log (y_conv + 1e-8 ))
228
+ cross_entropy = - tf .reduce_sum (ph [ 0 ] * tf .log (y_conv + 1e-8 ))
229
229
ce_summ = tf .scalar_summary ("cross entropy" , cross_entropy )
230
230
231
231
with tf .name_scope ("train" ) as scope :
232
232
train_step = tf .train .AdamOptimizer (1e-4 ).minimize (cross_entropy )
233
233
234
234
with tf .name_scope ("test" ) as scope :
235
- correct_prediction = tf .equal (tf .argmax (y_conv ,1 ), tf .argmax (ph . m_label_one_hot ,1 ))
235
+ correct_prediction = tf .equal (tf .argmax (y_conv ,1 ), tf .argmax (ph [ 0 ] ,1 ))
236
236
237
237
accuracy = tf .reduce_mean (tf .cast (correct_prediction , tf .float32 ))
238
238
accuracy_summary = tf .scalar_summary ("accuracy" , accuracy )
@@ -351,6 +351,9 @@ def computeSize(s,tens):
351
351
352
352
# output only the character label and the image
353
353
# output_feature_list = ['m_label_one_hot','image']
354
+
355
+ # identify the font given the input images
356
+ #output_feature_list = ['font_one_hot','image','italic','aspect_ratio','upper_case']
354
357
355
358
# train the digits 0-9 for all fonts
356
359
input_filters_dict = {'m_label' : range (48 ,58 )}
0 commit comments