@@ -107,7 +107,7 @@ def max_pool_2x2(x):
107
107
with tf .name_scope ("reshape_x_image" ) as scope :
108
108
self ._x_image = tf .reshape (self ._ph .image , [- 1 ,self ._nCols ,self ._nRows ,1 ])
109
109
110
- image_summ = tf .image_summary ("x_image" , self ._x_image )
110
+ image_summ = tf .summary . image ("x_image" , self ._x_image )
111
111
112
112
"""# ==============================================================================
113
113
@@ -218,40 +218,40 @@ def max_pool_2x2(x):
218
218
with tf .name_scope ("xent" ) as scope :
219
219
220
220
# 1e-8 added to eliminate the crash of training when taking log of 0
221
- cross_entropy = - tf .reduce_sum (self ._ph [0 ]* tf .log (y_conv + 1e-8 ))
221
+ self . _cross_entropy = - tf .reduce_sum (self ._ph [0 ]* tf .log (y_conv + 1e-8 ))
222
222
#cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
223
223
# logits, labels, name='xentropy')
224
- ce_summ = tf .scalar_summary ("cross entropy" , cross_entropy )
224
+ ce_summ = tf .summary . scalar ("cross entropy" , self . _cross_entropy )
225
225
226
226
with tf .name_scope ("reshape_x_image2" ) as scope :
227
227
self ._x_image2 = tf .reshape (self ._ph [0 ], [- 1 ,int (self ._nCols / 2 ),int (self ._nRows / 2 ),1 ])
228
228
229
- image_summ2 = tf .image_summary ("x_image2" , self ._x_image2 )
229
+ image_summ2 = tf .summary . image ("x_image2" , self ._x_image2 )
230
230
231
231
with tf .name_scope ("train" ) as scope :
232
- self ._train_step = tf .train .AdamOptimizer (1e-4 ).minimize (cross_entropy )
232
+ self ._train_step = tf .train .AdamOptimizer (1e-4 ).minimize (self . _cross_entropy )
233
233
#self._train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
234
234
235
235
with tf .name_scope ("test" ) as scope :
236
236
self ._correct_prediction = tf .equal (tf .argmax (y_conv ,1 ), tf .argmax (self ._ph [0 ],1 ))
237
237
self ._prediction = tf .argmax (y_conv ,1 )
238
238
239
239
self ._accuracy = tf .reduce_mean (tf .cast (self ._correct_prediction , dtype ))
240
- accuracy_summary = tf .scalar_summary ("accuracy" , self ._accuracy )
240
+ accuracy_summary = tf .summary . scalar ("accuracy" , self ._accuracy )
241
241
"""# ==============================================================================
242
242
243
- Start TensorFlow Interactive Session
243
+ Start TensorFlow Session
244
244
245
245
""" # ==============================================================================
246
246
247
247
self ._sess .run (tf .initialize_all_variables ())
248
- self ._merged = tf .merge_all_summaries ()
248
+ self ._merged = tf .summary . merge_all ()
249
249
tm = ""
250
250
tp = datetime .datetime .now ().timetuple ()
251
251
for i in range (4 ):
252
252
tm += str (tp [i ])+ '-'
253
253
tm += str (tp [4 ])
254
- self ._writer = tf .train . SummaryWriter ("/tmp/ds_logs/" + tm , self ._sess .graph )
254
+ self ._writer = tf .summary . FileWriter ("/tmp/ds_logs/" + tm , self ._sess .graph )
255
255
256
256
def computeSize (s ,tens ):
257
257
sumC = 1
@@ -319,4 +319,36 @@ def test2(self, truthed_data, title = ''):
319
319
ocr_utils .montage (output_images ,title = 'TensorFlow Output Images' )
320
320
ocr_utils .montage (input_images ,title = 'TensorFlow Input Images' )
321
321
322
+ def fit_entropy (self , truthed_data , nEpochs = 5000 ):
323
+
324
+ perfect_count = 10
325
+ for i in range (nEpochs ):
326
+
327
+ batch = truthed_data .next_batch (100 )
328
+ # assign feature data to each placeholder
329
+ # the batch list is returned in the same order as the features requested
330
+ feed = {self ._keep_prob : 0.5 }
331
+ for j in range (truthed_data .num_features ):
332
+ feed [self ._ph [j ]] = batch [j ]
333
+
334
+ if i % 100 == 0 :
335
+
336
+ feed [self ._keep_prob ] = 1.0
337
+ result = self ._sess .run ([self ._merged , self ._cross_entropy ], feed_dict = feed )
338
+ summary_str = result [0 ]
339
+
340
+ self ._writer .add_summary (summary_str , i )
341
+ train_entropy = result [1 ]
342
+ if train_entropy >= (2000 ):
343
+ perfect_count = 10 ;
344
+ else :
345
+ perfect_count -= 1
346
+ if perfect_count == 0 :
347
+ break ;
348
+
349
+ print ("step %d, training entropy %g" % (i , train_entropy ),flush = True )
350
+ self ._sess .run (self ._train_step ,feed_dict = feed )
351
+
352
+
353
+
322
354
0 commit comments