Skip to content

Commit

Permalink
tf_upgrade_v2 on resnet and utils folders. (tensorflow#6154)
Browse files Browse the repository at this point in the history
* Add resnet56 short tests. (tensorflow#6101)

* Add resnet56 short tests.
- created base benchmark module
- renamed accuracy test class to contain the word Accuracy
which will result in a need to update all the jobs
and a loss of history but is worth it.
- short tests are mostly copied from shining with oss refactor

* Address feedback.

* Move flag_methods to init
- Address setting default flags repeatedly.

* Rename accuracy tests.

* Lint errors resolved.

* fix model_dir set to flags.data_dir.

* fixed not fulling pulling out flag_methods.

* Use core mirrored strategy in official models (tensorflow#6126)

* Imagenet short tests (tensorflow#6132)

* Add short imagenet tests (taken from seemuch)
- also rename to match go forward naming

* fix method name

* Update doc strings.

* Fixe gpu number.

* points default data_dir to child folder. (tensorflow#6131)

Failed test is python2  and was a kokoro failure

* Imagenet short tests (tensorflow#6136)

* Add short imagenet tests (taken from seemuch)
- also rename to match go forward naming

* fix method name

* Update doc strings.

* Fixe gpu number.

* Add fill_objects

* fixed calling wrong class in super.

* fix lint issue.

* Flag (tensorflow#6121)

* Fix the turn_off_ds flag problem

* add param names to all args

* Export benchmark stats using tf.test.Benchmark.report_benchmark() (tensorflow#6103)

* Export benchmark stats using tf.test.Benchmark.report_benchmark()

* Fix python style using pyformat

* Typos. (tensorflow#6120)

* log verbosity=2 logs every epoch no progress bars (tensorflow#6142)

* tf_upgrade_v2 on resnet and utils folder.

* tf_upgrade_v2 on resnet and utils folder.
  • Loading branch information
goldiegadde authored and tfboyd committed Feb 5, 2019
1 parent 722f345 commit d6b2b83
Show file tree
Hide file tree
Showing 34 changed files with 266 additions and 251 deletions.
2 changes: 1 addition & 1 deletion official/resnet/cifar10_download_and_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,4 +60,4 @@ def _progress(count, block_size, total_size):

if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(argv=[sys.argv[0]] + unparsed)
tf.compat.v1.app.run(argv=[sys.argv[0]] + unparsed)
15 changes: 8 additions & 7 deletions official/resnet/cifar10_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
###############################################################################
def get_filenames(is_training, data_dir):
"""Returns a list of filenames."""
assert tf.gfile.Exists(data_dir), (
assert tf.io.gfile.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')

Expand All @@ -68,7 +68,7 @@ def get_filenames(is_training, data_dir):
def parse_record(raw_record, is_training, dtype):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.decode_raw(raw_record, tf.uint8)
record_vector = tf.io.decode_raw(raw_record, tf.uint8)

# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
Expand All @@ -81,7 +81,7 @@ def parse_record(raw_record, is_training, dtype):

# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)

image = preprocess_image(image, is_training)
image = tf.cast(image, dtype)
Expand All @@ -97,7 +97,7 @@ def preprocess_image(image, is_training):
image, HEIGHT + 8, WIDTH + 8)

# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
image = tf.image.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])

# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
Expand Down Expand Up @@ -253,8 +253,9 @@ def run_cifar(flags_obj):
Dictionary of results. Including final accuracy.
"""
if flags_obj.image_bytes_as_serving_input:
tf.logging.fatal('--image_bytes_as_serving_input cannot be set to True '
'for CIFAR. This flag is only applicable to ImageNet.')
tf.compat.v1.logging.fatal(
'--image_bytes_as_serving_input cannot be set to True for CIFAR. '
'This flag is only applicable to ImageNet.')
return

input_function = (flags_obj.use_synthetic_data and
Expand All @@ -273,6 +274,6 @@ def main(_):


if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_cifar_flags()
absl_app.run(main)
11 changes: 6 additions & 5 deletions official/resnet/cifar10_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from official.resnet import cifar10_main
from official.utils.testing import integration

tf.logging.set_verbosity(tf.logging.ERROR)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

_BATCH_SIZE = 128
_HEIGHT = 32
Expand All @@ -44,7 +44,7 @@ def setUpClass(cls): # pylint: disable=invalid-name

def tearDown(self):
super(BaseTest, self).tearDown()
tf.gfile.DeleteRecursively(self.get_temp_dir())
tf.io.gfile.rmtree(self.get_temp_dir())

def test_dataset_input_fn(self):
fake_data = bytearray()
Expand All @@ -62,7 +62,8 @@ def test_dataset_input_fn(self):
filename, cifar10_main._RECORD_BYTES) # pylint: disable=protected-access
fake_dataset = fake_dataset.map(
lambda val: cifar10_main.parse_record(val, False, tf.float32))
image, label = fake_dataset.make_one_shot_iterator().get_next()
image, label = tf.compat.v1.data.make_one_shot_iterator(
fake_dataset).get_next()

self.assertAllEqual(label.shape, ())
self.assertAllEqual(image.shape, (_HEIGHT, _WIDTH, _NUM_CHANNELS))
Expand All @@ -79,7 +80,7 @@ def test_dataset_input_fn(self):
def cifar10_model_fn_helper(self, mode, resnet_version, dtype):
input_fn = cifar10_main.get_synth_input_fn(dtype)
dataset = input_fn(True, '', _BATCH_SIZE)
iterator = dataset.make_initializable_iterator()
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
features, labels = iterator.get_next()
spec = cifar10_main.cifar10_model_fn(
features, labels, mode, {
Expand Down Expand Up @@ -142,7 +143,7 @@ def _test_cifar10model_shape(self, resnet_version):
model = cifar10_main.Cifar10Model(32, data_format='channels_last',
num_classes=num_classes,
resnet_version=resnet_version)
fake_input = tf.random_uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
fake_input = tf.random.uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
output = model(fake_input, training=True)

self.assertAllEqual(output.shape, (batch_size, num_classes))
Expand Down
2 changes: 1 addition & 1 deletion official/resnet/estimator_cifar_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)

def _setup(self):
tf.logging.set_verbosity(tf.logging.DEBUG)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
if EstimatorCifar10BenchmarkTests.local_flags is None:
cifar_main.define_cifar_flags()
# Loads flags to get defaults to then override.
Expand Down
21 changes: 11 additions & 10 deletions official/resnet/imagenet_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,22 +95,23 @@ def _parse_example_proto(example_serialized):
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})

features = tf.parse_single_example(example_serialized, feature_map)
features = tf.io.parse_single_example(serialized=example_serialized,
features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)

xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
Expand All @@ -124,7 +125,7 @@ def _parse_example_proto(example_serialized):
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
bbox = tf.transpose(a=bbox, perm=[0, 2, 1])

return features['image/encoded'], label, bbox

Expand Down Expand Up @@ -188,7 +189,7 @@ def input_fn(is_training, data_dir, batch_size, num_epochs=1,
# This number is low enough to not cause too much contention on small systems
# but high enough to provide the benefits of parallelization. You may want
# to increase this number if you have a large number of CPU cores.
dataset = dataset.apply(tf.contrib.data.parallel_interleave(
dataset = dataset.apply(tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=10))

return resnet_run_loop.process_record_dataset(
Expand Down Expand Up @@ -352,6 +353,6 @@ def main(_):


if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_imagenet_flags()
absl_app.run(main)
6 changes: 3 additions & 3 deletions official/resnet/imagenet_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def _central_crop(image, crop_height, crop_width):
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
shape = tf.shape(input=image)
height, width = shape[0], shape[1]

amount_to_be_cropped_h = (height - crop_height)
Expand Down Expand Up @@ -195,7 +195,7 @@ def _aspect_preserving_resize(image, resize_min):
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
shape = tf.shape(input=image)
height, width = shape[0], shape[1]

new_height, new_width = _smallest_size_at_least(height, width, resize_min)
Expand All @@ -218,7 +218,7 @@ def _resize_image(image, height, width):
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize_images(
return tf.image.resize(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)

Expand Down
12 changes: 6 additions & 6 deletions official/resnet/imagenet_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from official.resnet import imagenet_main
from official.utils.testing import integration

tf.logging.set_verbosity(tf.logging.ERROR)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

_BATCH_SIZE = 32
_LABEL_CLASSES = 1001
Expand All @@ -39,7 +39,7 @@ def setUpClass(cls): # pylint: disable=invalid-name

def tearDown(self):
super(BaseTest, self).tearDown()
tf.gfile.DeleteRecursively(self.get_temp_dir())
tf.io.gfile.rmtree(self.get_temp_dir())

def _tensor_shapes_helper(self, resnet_size, resnet_version, dtype, with_gpu):
"""Checks the tensor shapes after each phase of the ResNet model."""
Expand All @@ -62,7 +62,7 @@ def reshape(shape):
resnet_version=resnet_version,
dtype=dtype
)
inputs = tf.random_uniform([1, 224, 224, 3])
inputs = tf.random.uniform([1, 224, 224, 3])
output = model(inputs, training=True)

initial_conv = graph.get_tensor_by_name('resnet_model/initial_conv:0')
Expand Down Expand Up @@ -189,11 +189,11 @@ def test_tensor_shapes_resnet_200_with_gpu_v2(self):

def resnet_model_fn_helper(self, mode, resnet_version, dtype):
"""Tests that the EstimatorSpec is given the appropriate arguments."""
tf.train.create_global_step()
tf.compat.v1.train.create_global_step()

input_fn = imagenet_main.get_synth_input_fn(dtype)
dataset = input_fn(True, '', _BATCH_SIZE)
iterator = dataset.make_initializable_iterator()
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
features, labels = iterator.get_next()
spec = imagenet_main.imagenet_model_fn(
features, labels, mode, {
Expand Down Expand Up @@ -257,7 +257,7 @@ def _test_imagenetmodel_shape(self, resnet_version):
50, data_format='channels_last', num_classes=num_classes,
resnet_version=resnet_version)

fake_input = tf.random_uniform([batch_size, 224, 224, 3])
fake_input = tf.random.uniform([batch_size, 224, 224, 3])
output = model(fake_input, training=True)

self.assertAllEqual(output.shape, (batch_size, num_classes))
Expand Down
2 changes: 1 addition & 1 deletion official/resnet/keras/keras_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def _get_model_dir(self, folder_name):

def _setup(self):
"""Sets up and resets flags before each test."""
tf.logging.set_verbosity(tf.logging.DEBUG)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
if KerasBenchmark.local_flags is None:
for flag_method in self.flag_methods:
flag_method()
Expand Down
6 changes: 3 additions & 3 deletions official/resnet/keras/keras_cifar_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def parse_record_keras(raw_record, is_training, dtype):
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
image, label = cifar_main.parse_record(raw_record, is_training, dtype)
label = tf.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
label = tf.compat.v1.sparse_to_dense(label, (cifar_main.NUM_CLASSES,), 1)
return image, label


Expand All @@ -98,7 +98,7 @@ def run(flags_obj):
Dictionary of training and eval stats.
"""
if flags_obj.enable_eager:
tf.enable_eager_execution()
tf.compat.v1.enable_eager_execution()

dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16':
Expand Down Expand Up @@ -194,7 +194,7 @@ def main(_):


if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
cifar_main.define_cifar_flags()
keras_common.define_keras_flags()
absl_app.run(main)
38 changes: 19 additions & 19 deletions official/resnet/keras/keras_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,10 @@ def on_batch_end(self, batch, logs=None):
if batch != 0:
self.record_batch = True
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
tf.logging.info("BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
"'images_per_second': %f}" %
(batch, elapsed_time, examples_per_second))
tf.compat.v1.logging.info(
"BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
"'images_per_second': %f}" %
(batch, elapsed_time, examples_per_second))


class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
Expand Down Expand Up @@ -120,8 +121,9 @@ def on_batch_begin(self, batch, logs=None):
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.logging.debug('Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)


def get_optimizer():
Expand Down Expand Up @@ -226,22 +228,20 @@ def get_synth_input_fn(height, width, num_channels, num_classes,
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
# Synthetic input should be within [0, 255].
inputs = tf.truncated_normal(
[height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')

labels = tf.random_uniform(
[1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')

labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size)
data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data

return input_fn
Expand Down
2 changes: 1 addition & 1 deletion official/resnet/keras/keras_common_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from official.resnet.keras import keras_common

tf.logging.set_verbosity(tf.logging.ERROR)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)


class KerasCommonTests(tf.test.TestCase):
Expand Down
4 changes: 2 additions & 2 deletions official/resnet/keras/keras_imagenet_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def run(flags_obj):
ValueError: If fp16 is passed as it is not currently supported.
"""
if flags_obj.enable_eager:
tf.enable_eager_execution()
tf.compat.v1.enable_eager_execution()

dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16':
Expand Down Expand Up @@ -187,7 +187,7 @@ def main(_):


if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
imagenet_main.define_imagenet_flags()
keras_common.define_keras_flags()
absl_app.run(main)
Loading

0 comments on commit d6b2b83

Please sign in to comment.