Skip to content

Commit d4eedbb

Browse files
marksandler2sguada
authored andcommitted
Merged commit includes the following changes: (#8077)
Internal cleanup (py2->py3) plus the following changes: 285513318 by Sergio Guadarrama: Adds a script for post-training quantization 284222305 by Sergio Guadarrama: Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W. 282028343 by Sergio Guadarrama: Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory. PiperOrigin-RevId: 289455329 Co-authored-by: Sergio Guadarrama <sguada@gmail.com>
1 parent 0e0a94a commit d4eedbb

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1436
-1071
lines changed

research/slim/BUILD

+22-10
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Description:
22
# Contains files for loading, training and evaluating TF-Slim-based models.
33
# load("//devtools/python/blaze:python3.bzl", "py2and3_test")
4+
load("//devtools/python/blaze:pytype.bzl", "pytype_strict_binary")
45

56
package(
67
default_visibility = ["//visibility:public"],
@@ -475,11 +476,10 @@ py_test(
475476
],
476477
)
477478

478-
py_test(
479+
py_test( # py2and3_test
479480
name = "inception_v2_test",
480481
size = "large",
481482
srcs = ["nets/inception_v2_test.py"],
482-
python_version = "PY2",
483483
shard_count = 3,
484484
srcs_version = "PY2AND3",
485485
deps = [
@@ -590,14 +590,14 @@ py_library(
590590
],
591591
)
592592

593-
py_test(
593+
py_test( # py2and3_test
594594
name = "mobilenet_v2_test",
595595
srcs = ["nets/mobilenet/mobilenet_v2_test.py"],
596-
python_version = "PY2",
597596
srcs_version = "PY2AND3",
598597
deps = [
599598
":mobilenet",
600599
":mobilenet_common",
600+
"//third_party/py/six",
601601
# "//tensorflow",
602602
# "//tensorflow/contrib/slim",
603603
],
@@ -755,11 +755,10 @@ py_library(
755755
],
756756
)
757757

758-
py_test(
758+
py_test( # py2and3_test
759759
name = "overfeat_test",
760760
size = "medium",
761761
srcs = ["nets/overfeat_test.py"],
762-
python_version = "PY2",
763762
srcs_version = "PY2AND3",
764763
deps = [
765764
":overfeat",
@@ -890,11 +889,10 @@ py_library(
890889
],
891890
)
892891

893-
py_test(
892+
py_test( # py2and3_test
894893
name = "vgg_test",
895894
size = "medium",
896895
srcs = ["nets/vgg_test.py"],
897-
python_version = "PY2",
898896
srcs_version = "PY2AND3",
899897
deps = [
900898
":vgg",
@@ -912,11 +910,10 @@ py_library(
912910
],
913911
)
914912

915-
py_test(
913+
py_test( # py2and3_test
916914
name = "nets_factory_test",
917915
size = "large",
918916
srcs = ["nets/nets_factory_test.py"],
919-
python_version = "PY2",
920917
shard_count = 3,
921918
srcs_version = "PY2AND3",
922919
deps = [
@@ -925,9 +922,24 @@ py_test(
925922
],
926923
)
927924

925+
pytype_strict_binary(
926+
name = "post_training_quantization",
927+
srcs = ["nets/post_training_quantization.py"],
928+
python_version = "PY3",
929+
deps = [
930+
":nets_factory",
931+
":preprocessing_factory",
932+
"//third_party/py/absl:app",
933+
"//third_party/py/absl/flags",
934+
# "//tensorflow",
935+
# "//tensorflow_datasets",
936+
],
937+
)
938+
928939
py_library(
929940
name = "train_image_classifier_lib",
930941
srcs = ["train_image_classifier.py"],
942+
srcs_version = "PY2AND3",
931943
deps = [
932944
":dataset_factory",
933945
":model_deploy",

research/slim/datasets/download_and_convert_visualwakewords_lib.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
201201
groundtruth_data = json.load(fid)
202202
images = groundtruth_data['images']
203203
annotations_index = groundtruth_data['annotations']
204-
annotations_index = {int(k): v for k, v in annotations_index.items()}
204+
annotations_index = {int(k): v for k, v in annotations_index.iteritems()}
205205
# convert 'unicode' key to 'int' key after we parse the json file
206206

207207
for idx, image in enumerate(images):

research/slim/nets/alexnet.py

+19-12
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,16 @@
4040
from tensorflow.contrib import slim as contrib_slim
4141

4242
slim = contrib_slim
43-
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
43+
44+
# pylint: disable=g-long-lambda
45+
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
46+
0.0, stddev)
4447

4548

4649
def alexnet_v2_arg_scope(weight_decay=0.0005):
4750
with slim.arg_scope([slim.conv2d, slim.fully_connected],
4851
activation_fn=tf.nn.relu,
49-
biases_initializer=tf.constant_initializer(0.1),
52+
biases_initializer=tf.compat.v1.constant_initializer(0.1),
5053
weights_regularizer=slim.l2_regularizer(weight_decay)):
5154
with slim.arg_scope([slim.conv2d], padding='SAME'):
5255
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
@@ -94,7 +97,7 @@ def alexnet_v2(inputs,
9497
or None).
9598
end_points: a dict of tensors with intermediate activations.
9699
"""
97-
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
100+
with tf.compat.v1.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
98101
end_points_collection = sc.original_name_scope + '_end_points'
99102
# Collect outputs for conv2d, fully_connected and max_pool2d.
100103
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
@@ -110,9 +113,10 @@ def alexnet_v2(inputs,
110113
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
111114

112115
# Use conv2d instead of fully_connected layers.
113-
with slim.arg_scope([slim.conv2d],
114-
weights_initializer=trunc_normal(0.005),
115-
biases_initializer=tf.constant_initializer(0.1)):
116+
with slim.arg_scope(
117+
[slim.conv2d],
118+
weights_initializer=trunc_normal(0.005),
119+
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
116120
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
117121
scope='fc6')
118122
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
@@ -122,16 +126,19 @@ def alexnet_v2(inputs,
122126
end_points = slim.utils.convert_collection_to_dict(
123127
end_points_collection)
124128
if global_pool:
125-
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
129+
net = tf.reduce_mean(
130+
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
126131
end_points['global_pool'] = net
127132
if num_classes:
128133
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
129134
scope='dropout7')
130-
net = slim.conv2d(net, num_classes, [1, 1],
131-
activation_fn=None,
132-
normalizer_fn=None,
133-
biases_initializer=tf.zeros_initializer(),
134-
scope='fc8')
135+
net = slim.conv2d(
136+
net,
137+
num_classes, [1, 1],
138+
activation_fn=None,
139+
normalizer_fn=None,
140+
biases_initializer=tf.compat.v1.zeros_initializer(),
141+
scope='fc8')
135142
if spatial_squeeze:
136143
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
137144
end_points[sc.name + '/fc8'] = net

research/slim/nets/alexnet_test.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def testBuild(self):
3232
height, width = 224, 224
3333
num_classes = 1000
3434
with self.test_session():
35-
inputs = tf.random_uniform((batch_size, height, width, 3))
35+
inputs = tf.random.uniform((batch_size, height, width, 3))
3636
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
3737
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
3838
self.assertListEqual(logits.get_shape().as_list(),
@@ -43,7 +43,7 @@ def testFullyConvolutional(self):
4343
height, width = 300, 400
4444
num_classes = 1000
4545
with self.test_session():
46-
inputs = tf.random_uniform((batch_size, height, width, 3))
46+
inputs = tf.random.uniform((batch_size, height, width, 3))
4747
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
4848
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
4949
self.assertListEqual(logits.get_shape().as_list(),
@@ -54,7 +54,7 @@ def testGlobalPool(self):
5454
height, width = 256, 256
5555
num_classes = 1000
5656
with self.test_session():
57-
inputs = tf.random_uniform((batch_size, height, width, 3))
57+
inputs = tf.random.uniform((batch_size, height, width, 3))
5858
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
5959
global_pool=True)
6060
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
@@ -66,7 +66,7 @@ def testEndPoints(self):
6666
height, width = 224, 224
6767
num_classes = 1000
6868
with self.test_session():
69-
inputs = tf.random_uniform((batch_size, height, width, 3))
69+
inputs = tf.random.uniform((batch_size, height, width, 3))
7070
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
7171
expected_names = ['alexnet_v2/conv1',
7272
'alexnet_v2/pool1',
@@ -87,7 +87,7 @@ def testNoClasses(self):
8787
height, width = 224, 224
8888
num_classes = None
8989
with self.test_session():
90-
inputs = tf.random_uniform((batch_size, height, width, 3))
90+
inputs = tf.random.uniform((batch_size, height, width, 3))
9191
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
9292
expected_names = ['alexnet_v2/conv1',
9393
'alexnet_v2/pool1',
@@ -110,7 +110,7 @@ def testModelVariables(self):
110110
height, width = 224, 224
111111
num_classes = 1000
112112
with self.test_session():
113-
inputs = tf.random_uniform((batch_size, height, width, 3))
113+
inputs = tf.random.uniform((batch_size, height, width, 3))
114114
alexnet.alexnet_v2(inputs, num_classes)
115115
expected_names = ['alexnet_v2/conv1/weights',
116116
'alexnet_v2/conv1/biases',
@@ -137,11 +137,11 @@ def testEvaluation(self):
137137
height, width = 224, 224
138138
num_classes = 1000
139139
with self.test_session():
140-
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
140+
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
141141
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
142142
self.assertListEqual(logits.get_shape().as_list(),
143143
[batch_size, num_classes])
144-
predictions = tf.argmax(logits, 1)
144+
predictions = tf.argmax(input=logits, axis=1)
145145
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
146146

147147
def testTrainEvalWithReuse(self):
@@ -151,29 +151,29 @@ def testTrainEvalWithReuse(self):
151151
eval_height, eval_width = 300, 400
152152
num_classes = 1000
153153
with self.test_session():
154-
train_inputs = tf.random_uniform(
154+
train_inputs = tf.random.uniform(
155155
(train_batch_size, train_height, train_width, 3))
156156
logits, _ = alexnet.alexnet_v2(train_inputs)
157157
self.assertListEqual(logits.get_shape().as_list(),
158158
[train_batch_size, num_classes])
159-
tf.get_variable_scope().reuse_variables()
160-
eval_inputs = tf.random_uniform(
159+
tf.compat.v1.get_variable_scope().reuse_variables()
160+
eval_inputs = tf.random.uniform(
161161
(eval_batch_size, eval_height, eval_width, 3))
162162
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
163163
spatial_squeeze=False)
164164
self.assertListEqual(logits.get_shape().as_list(),
165165
[eval_batch_size, 4, 7, num_classes])
166-
logits = tf.reduce_mean(logits, [1, 2])
167-
predictions = tf.argmax(logits, 1)
166+
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
167+
predictions = tf.argmax(input=logits, axis=1)
168168
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
169169

170170
def testForward(self):
171171
batch_size = 1
172172
height, width = 224, 224
173173
with self.test_session() as sess:
174-
inputs = tf.random_uniform((batch_size, height, width, 3))
174+
inputs = tf.random.uniform((batch_size, height, width, 3))
175175
logits, _ = alexnet.alexnet_v2(inputs)
176-
sess.run(tf.global_variables_initializer())
176+
sess.run(tf.compat.v1.global_variables_initializer())
177177
output = sess.run(logits)
178178
self.assertTrue(output.any())
179179

research/slim/nets/cifarnet.py

+15-10
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,9 @@
2323

2424
slim = contrib_slim
2525

26-
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
26+
# pylint: disable=g-long-lambda
27+
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
28+
stddev=stddev)
2729

2830

2931
def cifarnet(images, num_classes=10, is_training=False,
@@ -61,7 +63,7 @@ def cifarnet(images, num_classes=10, is_training=False,
6163
"""
6264
end_points = {}
6365

64-
with tf.variable_scope(scope, 'CifarNet', [images]):
66+
with tf.compat.v1.variable_scope(scope, 'CifarNet', [images]):
6567
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
6668
end_points['conv1'] = net
6769
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
@@ -82,12 +84,14 @@ def cifarnet(images, num_classes=10, is_training=False,
8284
end_points['fc4'] = net
8385
if not num_classes:
8486
return net, end_points
85-
logits = slim.fully_connected(net, num_classes,
86-
biases_initializer=tf.zeros_initializer(),
87-
weights_initializer=trunc_normal(1/192.0),
88-
weights_regularizer=None,
89-
activation_fn=None,
90-
scope='logits')
87+
logits = slim.fully_connected(
88+
net,
89+
num_classes,
90+
biases_initializer=tf.compat.v1.zeros_initializer(),
91+
weights_initializer=trunc_normal(1 / 192.0),
92+
weights_regularizer=None,
93+
activation_fn=None,
94+
scope='logits')
9195

9296
end_points['Logits'] = logits
9397
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
@@ -107,11 +111,12 @@ def cifarnet_arg_scope(weight_decay=0.004):
107111
"""
108112
with slim.arg_scope(
109113
[slim.conv2d],
110-
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
114+
weights_initializer=tf.compat.v1.truncated_normal_initializer(
115+
stddev=5e-2),
111116
activation_fn=tf.nn.relu):
112117
with slim.arg_scope(
113118
[slim.fully_connected],
114-
biases_initializer=tf.constant_initializer(0.1),
119+
biases_initializer=tf.compat.v1.constant_initializer(0.1),
115120
weights_initializer=trunc_normal(0.04),
116121
weights_regularizer=slim.l2_regularizer(weight_decay),
117122
activation_fn=tf.nn.relu) as sc:

0 commit comments

Comments
 (0)