Skip to content

Commit

Permalink
Merged commit includes the following changes: (tensorflow#8077)
Browse files Browse the repository at this point in the history
Internal cleanup (py2->py3) plus the following changes:

285513318  by Sergio Guadarrama:

    Adds a script for post-training quantization

284222305  by Sergio Guadarrama:

    Modified squeeze-excite operation to accommodate tensors of undefined (Nonetype) H/W.

282028343  by Sergio Guadarrama:

    Add MobilenetV3 and MobilenetEdgeTPU to the slim/nets_factory.

PiperOrigin-RevId: 289455329

Co-authored-by: Sergio Guadarrama <[email protected]>
  • Loading branch information
2 people authored and saberkun committed Jan 22, 2020
1 parent 0e0a94a commit d4eedbb
Show file tree
Hide file tree
Showing 58 changed files with 1,436 additions and 1,071 deletions.
32 changes: 22 additions & 10 deletions research/slim/BUILD
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Description:
# Contains files for loading, training and evaluating TF-Slim-based models.
# load("//devtools/python/blaze:python3.bzl", "py2and3_test")
load("//devtools/python/blaze:pytype.bzl", "pytype_strict_binary")

package(
default_visibility = ["//visibility:public"],
Expand Down Expand Up @@ -475,11 +476,10 @@ py_test(
],
)

py_test(
py_test( # py2and3_test
name = "inception_v2_test",
size = "large",
srcs = ["nets/inception_v2_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
Expand Down Expand Up @@ -590,14 +590,14 @@ py_library(
],
)

py_test(
py_test( # py2and3_test
name = "mobilenet_v2_test",
srcs = ["nets/mobilenet/mobilenet_v2_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":mobilenet",
":mobilenet_common",
"//third_party/py/six",
# "//tensorflow",
# "//tensorflow/contrib/slim",
],
Expand Down Expand Up @@ -755,11 +755,10 @@ py_library(
],
)

py_test(
py_test( # py2and3_test
name = "overfeat_test",
size = "medium",
srcs = ["nets/overfeat_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":overfeat",
Expand Down Expand Up @@ -890,11 +889,10 @@ py_library(
],
)

py_test(
py_test( # py2and3_test
name = "vgg_test",
size = "medium",
srcs = ["nets/vgg_test.py"],
python_version = "PY2",
srcs_version = "PY2AND3",
deps = [
":vgg",
Expand All @@ -912,11 +910,10 @@ py_library(
],
)

py_test(
py_test( # py2and3_test
name = "nets_factory_test",
size = "large",
srcs = ["nets/nets_factory_test.py"],
python_version = "PY2",
shard_count = 3,
srcs_version = "PY2AND3",
deps = [
Expand All @@ -925,9 +922,24 @@ py_test(
],
)

pytype_strict_binary(
name = "post_training_quantization",
srcs = ["nets/post_training_quantization.py"],
python_version = "PY3",
deps = [
":nets_factory",
":preprocessing_factory",
"//third_party/py/absl:app",
"//third_party/py/absl/flags",
# "//tensorflow",
# "//tensorflow_datasets",
],
)

py_library(
name = "train_image_classifier_lib",
srcs = ["train_image_classifier.py"],
srcs_version = "PY2AND3",
deps = [
":dataset_factory",
":model_deploy",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
annotations_index = groundtruth_data['annotations']
annotations_index = {int(k): v for k, v in annotations_index.items()}
annotations_index = {int(k): v for k, v in annotations_index.iteritems()}
# convert 'unicode' key to 'int' key after we parse the json file

for idx, image in enumerate(images):
Expand Down
31 changes: 19 additions & 12 deletions research/slim/nets/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,16 @@
from tensorflow.contrib import slim as contrib_slim

slim = contrib_slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)

# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
0.0, stddev)


def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
Expand Down Expand Up @@ -94,7 +97,7 @@ def alexnet_v2(inputs,
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
with tf.compat.v1.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
Expand All @@ -110,9 +113,10 @@ def alexnet_v2(inputs,
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')

# Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.compat.v1.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
Expand All @@ -122,16 +126,19 @@ def alexnet_v2(inputs,
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.compat.v1.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
Expand Down
30 changes: 15 additions & 15 deletions research/slim/nets/alexnet_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def testBuild(self):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
Expand All @@ -43,7 +43,7 @@ def testFullyConvolutional(self):
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
Expand All @@ -54,7 +54,7 @@ def testGlobalPool(self):
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
Expand All @@ -66,7 +66,7 @@ def testEndPoints(self):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
Expand All @@ -87,7 +87,7 @@ def testNoClasses(self):
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
Expand All @@ -110,7 +110,7 @@ def testModelVariables(self):
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
Expand All @@ -137,11 +137,11 @@ def testEvaluation(self):
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
eval_inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
predictions = tf.argmax(input=logits, axis=1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])

def testTrainEvalWithReuse(self):
Expand All @@ -151,29 +151,29 @@ def testTrainEvalWithReuse(self):
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
train_inputs = tf.random.uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
tf.compat.v1.get_variable_scope().reuse_variables()
eval_inputs = tf.random.uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
logits = tf.reduce_mean(input_tensor=logits, axis=[1, 2])
predictions = tf.argmax(input=logits, axis=1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])

def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
inputs = tf.random.uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())

Expand Down
25 changes: 15 additions & 10 deletions research/slim/nets/cifarnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@

slim = contrib_slim

trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.compat.v1.truncated_normal_initializer(
stddev=stddev)


def cifarnet(images, num_classes=10, is_training=False,
Expand Down Expand Up @@ -61,7 +63,7 @@ def cifarnet(images, num_classes=10, is_training=False,
"""
end_points = {}

with tf.variable_scope(scope, 'CifarNet', [images]):
with tf.compat.v1.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
Expand All @@ -82,12 +84,14 @@ def cifarnet(images, num_classes=10, is_training=False,
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(net, num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1/192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
logits = slim.fully_connected(
net,
num_classes,
biases_initializer=tf.compat.v1.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')

end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
Expand All @@ -107,11 +111,12 @@ def cifarnet_arg_scope(weight_decay=0.004):
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
weights_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
biases_initializer=tf.compat.v1.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
Expand Down
Loading

0 comments on commit d4eedbb

Please sign in to comment.