Skip to content

Commit 57e0752

Browse files
goldiegaddetfboyd
authored andcommitted
Revert "tf_upgrade_v2 on resnet and utils folders. (tensorflow#6154)" (tensorflow#6162)
This reverts commit d6b2b83.
1 parent e42c769 commit 57e0752

34 files changed

+251
-266
lines changed

official/resnet/cifar10_download_and_extract.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,4 +60,4 @@ def _progress(count, block_size, total_size):
6060

6161
if __name__ == '__main__':
6262
FLAGS, unparsed = parser.parse_known_args()
63-
tf.compat.v1.app.run(argv=[sys.argv[0]] + unparsed)
63+
tf.app.run(argv=[sys.argv[0]] + unparsed)

official/resnet/cifar10_main.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
###############################################################################
5353
def get_filenames(is_training, data_dir):
5454
"""Returns a list of filenames."""
55-
assert tf.io.gfile.exists(data_dir), (
55+
assert tf.gfile.Exists(data_dir), (
5656
'Run cifar10_download_and_extract.py first to download and extract the '
5757
'CIFAR-10 data.')
5858

@@ -68,7 +68,7 @@ def get_filenames(is_training, data_dir):
6868
def parse_record(raw_record, is_training, dtype):
6969
"""Parse CIFAR-10 image and label from a raw record."""
7070
# Convert bytes to a vector of uint8 that is record_bytes long.
71-
record_vector = tf.io.decode_raw(raw_record, tf.uint8)
71+
record_vector = tf.decode_raw(raw_record, tf.uint8)
7272

7373
# The first byte represents the label, which we convert from uint8 to int32
7474
# and then to one-hot.
@@ -81,7 +81,7 @@ def parse_record(raw_record, is_training, dtype):
8181

8282
# Convert from [depth, height, width] to [height, width, depth], and cast as
8383
# float32.
84-
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
84+
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
8585

8686
image = preprocess_image(image, is_training)
8787
image = tf.cast(image, dtype)
@@ -97,7 +97,7 @@ def preprocess_image(image, is_training):
9797
image, HEIGHT + 8, WIDTH + 8)
9898

9999
# Randomly crop a [HEIGHT, WIDTH] section of the image.
100-
image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
100+
image = tf.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
101101

102102
# Randomly flip the image horizontally.
103103
image = tf.image.random_flip_left_right(image)
@@ -253,9 +253,8 @@ def run_cifar(flags_obj):
253253
Dictionary of results. Including final accuracy.
254254
"""
255255
if flags_obj.image_bytes_as_serving_input:
256-
tf.compat.v1.logging.fatal(
257-
'--image_bytes_as_serving_input cannot be set to True for CIFAR. '
258-
'This flag is only applicable to ImageNet.')
256+
tf.logging.fatal('--image_bytes_as_serving_input cannot be set to True '
257+
'for CIFAR. This flag is only applicable to ImageNet.')
259258
return
260259

261260
input_function = (flags_obj.use_synthetic_data and
@@ -274,6 +273,6 @@ def main(_):
274273

275274

276275
if __name__ == '__main__':
277-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
276+
tf.logging.set_verbosity(tf.logging.INFO)
278277
define_cifar_flags()
279278
absl_app.run(main)

official/resnet/cifar10_test.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from official.resnet import cifar10_main
2626
from official.utils.testing import integration
2727

28-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
28+
tf.logging.set_verbosity(tf.logging.ERROR)
2929

3030
_BATCH_SIZE = 128
3131
_HEIGHT = 32
@@ -44,7 +44,7 @@ def setUpClass(cls): # pylint: disable=invalid-name
4444

4545
def tearDown(self):
4646
super(BaseTest, self).tearDown()
47-
tf.io.gfile.rmtree(self.get_temp_dir())
47+
tf.gfile.DeleteRecursively(self.get_temp_dir())
4848

4949
def test_dataset_input_fn(self):
5050
fake_data = bytearray()
@@ -62,8 +62,7 @@ def test_dataset_input_fn(self):
6262
filename, cifar10_main._RECORD_BYTES) # pylint: disable=protected-access
6363
fake_dataset = fake_dataset.map(
6464
lambda val: cifar10_main.parse_record(val, False, tf.float32))
65-
image, label = tf.compat.v1.data.make_one_shot_iterator(
66-
fake_dataset).get_next()
65+
image, label = fake_dataset.make_one_shot_iterator().get_next()
6766

6867
self.assertAllEqual(label.shape, ())
6968
self.assertAllEqual(image.shape, (_HEIGHT, _WIDTH, _NUM_CHANNELS))
@@ -80,7 +79,7 @@ def test_dataset_input_fn(self):
8079
def cifar10_model_fn_helper(self, mode, resnet_version, dtype):
8180
input_fn = cifar10_main.get_synth_input_fn(dtype)
8281
dataset = input_fn(True, '', _BATCH_SIZE)
83-
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
82+
iterator = dataset.make_initializable_iterator()
8483
features, labels = iterator.get_next()
8584
spec = cifar10_main.cifar10_model_fn(
8685
features, labels, mode, {
@@ -143,7 +142,7 @@ def _test_cifar10model_shape(self, resnet_version):
143142
model = cifar10_main.Cifar10Model(32, data_format='channels_last',
144143
num_classes=num_classes,
145144
resnet_version=resnet_version)
146-
fake_input = tf.random.uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
145+
fake_input = tf.random_uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
147146
output = model(fake_input, training=True)
148147

149148
self.assertAllEqual(output.shape, (batch_size, num_classes))

official/resnet/estimator_cifar_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def _get_model_dir(self, folder_name):
144144
return os.path.join(self.output_dir, folder_name)
145145

146146
def _setup(self):
147-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
147+
tf.logging.set_verbosity(tf.logging.DEBUG)
148148
if EstimatorCifar10BenchmarkTests.local_flags is None:
149149
cifar_main.define_cifar_flags()
150150
# Loads flags to get defaults to then override.

official/resnet/imagenet_main.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -95,23 +95,22 @@ def _parse_example_proto(example_serialized):
9595
"""
9696
# Dense features in Example proto.
9797
feature_map = {
98-
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
98+
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
99+
default_value=''),
100+
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64,
101+
default_value=-1),
102+
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
99103
default_value=''),
100-
'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
101-
default_value=-1),
102-
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
103-
default_value=''),
104104
}
105-
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
105+
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
106106
# Sparse features in Example proto.
107107
feature_map.update(
108108
{k: sparse_float32 for k in ['image/object/bbox/xmin',
109109
'image/object/bbox/ymin',
110110
'image/object/bbox/xmax',
111111
'image/object/bbox/ymax']})
112112

113-
features = tf.io.parse_single_example(serialized=example_serialized,
114-
features=feature_map)
113+
features = tf.parse_single_example(example_serialized, feature_map)
115114
label = tf.cast(features['image/class/label'], dtype=tf.int32)
116115

117116
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
@@ -125,7 +124,7 @@ def _parse_example_proto(example_serialized):
125124
# Force the variable number of bounding boxes into the shape
126125
# [1, num_boxes, coords].
127126
bbox = tf.expand_dims(bbox, 0)
128-
bbox = tf.transpose(a=bbox, perm=[0, 2, 1])
127+
bbox = tf.transpose(bbox, [0, 2, 1])
129128

130129
return features['image/encoded'], label, bbox
131130

@@ -189,7 +188,7 @@ def input_fn(is_training, data_dir, batch_size, num_epochs=1,
189188
# This number is low enough to not cause too much contention on small systems
190189
# but high enough to provide the benefits of parallelization. You may want
191190
# to increase this number if you have a large number of CPU cores.
192-
dataset = dataset.apply(tf.data.experimental.parallel_interleave(
191+
dataset = dataset.apply(tf.contrib.data.parallel_interleave(
193192
tf.data.TFRecordDataset, cycle_length=10))
194193

195194
return resnet_run_loop.process_record_dataset(
@@ -353,6 +352,6 @@ def main(_):
353352

354353

355354
if __name__ == '__main__':
356-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
355+
tf.logging.set_verbosity(tf.logging.INFO)
357356
define_imagenet_flags()
358357
absl_app.run(main)

official/resnet/imagenet_preprocessing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def _central_crop(image, crop_height, crop_width):
108108
Returns:
109109
3-D tensor with cropped image.
110110
"""
111-
shape = tf.shape(input=image)
111+
shape = tf.shape(image)
112112
height, width = shape[0], shape[1]
113113

114114
amount_to_be_cropped_h = (height - crop_height)
@@ -195,7 +195,7 @@ def _aspect_preserving_resize(image, resize_min):
195195
Returns:
196196
resized_image: A 3-D tensor containing the resized image.
197197
"""
198-
shape = tf.shape(input=image)
198+
shape = tf.shape(image)
199199
height, width = shape[0], shape[1]
200200

201201
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
@@ -218,7 +218,7 @@ def _resize_image(image, height, width):
218218
resized_image: A 3-D tensor containing the resized image. The first two
219219
dimensions have the shape [height, width].
220220
"""
221-
return tf.image.resize(
221+
return tf.image.resize_images(
222222
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
223223
align_corners=False)
224224

official/resnet/imagenet_test.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from official.resnet import imagenet_main
2525
from official.utils.testing import integration
2626

27-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
27+
tf.logging.set_verbosity(tf.logging.ERROR)
2828

2929
_BATCH_SIZE = 32
3030
_LABEL_CLASSES = 1001
@@ -39,7 +39,7 @@ def setUpClass(cls): # pylint: disable=invalid-name
3939

4040
def tearDown(self):
4141
super(BaseTest, self).tearDown()
42-
tf.io.gfile.rmtree(self.get_temp_dir())
42+
tf.gfile.DeleteRecursively(self.get_temp_dir())
4343

4444
def _tensor_shapes_helper(self, resnet_size, resnet_version, dtype, with_gpu):
4545
"""Checks the tensor shapes after each phase of the ResNet model."""
@@ -62,7 +62,7 @@ def reshape(shape):
6262
resnet_version=resnet_version,
6363
dtype=dtype
6464
)
65-
inputs = tf.random.uniform([1, 224, 224, 3])
65+
inputs = tf.random_uniform([1, 224, 224, 3])
6666
output = model(inputs, training=True)
6767

6868
initial_conv = graph.get_tensor_by_name('resnet_model/initial_conv:0')
@@ -189,11 +189,11 @@ def test_tensor_shapes_resnet_200_with_gpu_v2(self):
189189

190190
def resnet_model_fn_helper(self, mode, resnet_version, dtype):
191191
"""Tests that the EstimatorSpec is given the appropriate arguments."""
192-
tf.compat.v1.train.create_global_step()
192+
tf.train.create_global_step()
193193

194194
input_fn = imagenet_main.get_synth_input_fn(dtype)
195195
dataset = input_fn(True, '', _BATCH_SIZE)
196-
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
196+
iterator = dataset.make_initializable_iterator()
197197
features, labels = iterator.get_next()
198198
spec = imagenet_main.imagenet_model_fn(
199199
features, labels, mode, {
@@ -257,7 +257,7 @@ def _test_imagenetmodel_shape(self, resnet_version):
257257
50, data_format='channels_last', num_classes=num_classes,
258258
resnet_version=resnet_version)
259259

260-
fake_input = tf.random.uniform([batch_size, 224, 224, 3])
260+
fake_input = tf.random_uniform([batch_size, 224, 224, 3])
261261
output = model(fake_input, training=True)
262262

263263
self.assertAllEqual(output.shape, (batch_size, num_classes))

official/resnet/keras/keras_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def _get_model_dir(self, folder_name):
4343

4444
def _setup(self):
4545
"""Sets up and resets flags before each test."""
46-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
46+
tf.logging.set_verbosity(tf.logging.DEBUG)
4747
if KerasBenchmark.local_flags is None:
4848
for flag_method in self.flag_methods:
4949
flag_method()

official/resnet/keras/keras_cifar_main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def parse_record_keras(raw_record, is_training, dtype):
8181
Tuple with processed image tensor and one-hot-encoded label tensor.
8282
"""
8383
image, label = cifar_main.parse_record(raw_record, is_training, dtype)
84-
label = tf.compat.v1.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
84+
label = tf.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
8585
return image, label
8686

8787

@@ -98,7 +98,7 @@ def run(flags_obj):
9898
Dictionary of training and eval stats.
9999
"""
100100
if flags_obj.enable_eager:
101-
tf.compat.v1.enable_eager_execution()
101+
tf.enable_eager_execution()
102102

103103
dtype = flags_core.get_tf_dtype(flags_obj)
104104
if dtype == 'fp16':
@@ -194,7 +194,7 @@ def main(_):
194194

195195

196196
if __name__ == '__main__':
197-
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
197+
tf.logging.set_verbosity(tf.logging.INFO)
198198
cifar_main.define_cifar_flags()
199199
keras_common.define_keras_flags()
200200
absl_app.run(main)

official/resnet/keras/keras_common.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,9 @@ def on_batch_end(self, batch, logs=None):
8080
if batch != 0:
8181
self.record_batch = True
8282
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
83-
tf.compat.v1.logging.info(
84-
"BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
85-
"'images_per_second': %f}" %
86-
(batch, elapsed_time, examples_per_second))
83+
tf.logging.info("BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
84+
"'images_per_second': %f}" %
85+
(batch, elapsed_time, examples_per_second))
8786

8887

8988
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
@@ -121,9 +120,8 @@ def on_batch_begin(self, batch, logs=None):
121120
if lr != self.prev_lr:
122121
self.model.optimizer.learning_rate = lr # lr should be a float here
123122
self.prev_lr = lr
124-
tf.compat.v1.logging.debug(
125-
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
126-
'change learning rate to %s.', self.epochs, batch, lr)
123+
tf.logging.debug('Epoch %05d Batch %05d: LearningRateBatchScheduler '
124+
'change learning rate to %s.', self.epochs, batch, lr)
127125

128126

129127
def get_optimizer():
@@ -228,20 +226,22 @@ def get_synth_input_fn(height, width, num_channels, num_classes,
228226
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
229227
"""Returns dataset filled with random data."""
230228
# Synthetic input should be within [0, 255].
231-
inputs = tf.random.truncated_normal([height, width, num_channels],
232-
dtype=dtype,
233-
mean=127,
234-
stddev=60,
235-
name='synthetic_inputs')
236-
237-
labels = tf.random.uniform([1],
238-
minval=0,
239-
maxval=num_classes - 1,
240-
dtype=tf.int32,
241-
name='synthetic_labels')
229+
inputs = tf.truncated_normal(
230+
[height, width, num_channels],
231+
dtype=dtype,
232+
mean=127,
233+
stddev=60,
234+
name='synthetic_inputs')
235+
236+
labels = tf.random_uniform(
237+
[1],
238+
minval=0,
239+
maxval=num_classes - 1,
240+
dtype=tf.int32,
241+
name='synthetic_labels')
242242
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
243243
data = data.batch(batch_size)
244-
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
244+
data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
245245
return data
246246

247247
return input_fn

0 commit comments

Comments
 (0)