Skip to content

Commit

Permalink
Merge commit for internal changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Vijay Vasudevan committed Jun 8, 2016
2 parents b826b79 + 31bf8c1 commit c679c2f
Show file tree
Hide file tree
Showing 49 changed files with 1,033 additions and 164 deletions.
496 changes: 496 additions & 0 deletions grpc.BUILD

Large diffs are not rendered by default.

20 changes: 7 additions & 13 deletions tensorflow/contrib/framework/python/ops/variables.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,10 +229,6 @@ def variable(name, shape=None, dtype=dtypes.float32, initializer=None,
collections=collections,
caching_device=caching_device)

# TODO(sguada) move it to ops.GraphKeys or to contrib.framework.GraphKeys
# Collection containing all the variables created using model_variables.
MODEL_VARIABLES = '_model_variables_'


@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
Expand All @@ -251,8 +247,8 @@ def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.VARIABLES
and MODEL_VARIABLES collections.
Note that the variable is always also added to the `GraphKeys.VARIABLES`
and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
Expand All @@ -263,23 +259,21 @@ def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
The created or existing variable.
"""
collections = list(collections or [])

# Make sure variables are added to tf.GraphKeys.VARIABLES and MODEL_VARIABLES
collections += [ops.GraphKeys.VARIABLES, MODEL_VARIABLES]
collections += [ops.GraphKeys.VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
return variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device)


def add_model_variable(var):
"""Adds a variable to the MODEL_VARIABLES collection.
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(MODEL_VARIABLES):
ops.add_to_collection(MODEL_VARIABLES, var)
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)


def get_variables(scope=None, suffix=None, collection=ops.GraphKeys.VARIABLES):
Expand Down Expand Up @@ -310,7 +304,7 @@ def get_model_variables(scope=None, suffix=None):
Returns:
a list of variables in colelction with scope and suffix.
"""
return get_variables(scope, suffix, MODEL_VARIABLES)
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)


def get_local_variables(scope=None, suffix=None):
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/contrib/framework/python/ops/variables_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,9 @@ def testCreateVariable(self):
a = tf.contrib.framework.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertTrue(a in tf.get_collection(tf.GraphKeys.VARIABLES))
self.assertFalse(a in tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in tf.local_variables())

def testGetVariables(self):
with self.test_session():
Expand Down Expand Up @@ -437,6 +440,7 @@ def testNotInLocalVariables(self):
with tf.variable_scope('A'):
a = tf.contrib.framework.model_variable('a', [5])
self.assertTrue(a in tf.all_variables())
self.assertTrue(a in tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in tf.local_variables())

def testGetVariablesReturns(self):
Expand Down
75 changes: 46 additions & 29 deletions tensorflow/contrib/layers/python/layers/regularizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,112 +26,129 @@
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging

__all__ = ['l1_regularizer', 'l2_regularizer', 'sum_regularizer',
__all__ = ['l1_regularizer',
'l2_regularizer',
'l1_l2_regularizer',
'sum_regularizer',
'apply_regularization']


def l1_regularizer(scale):
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional op_scope name.
Returns:
A function with signature `l1(weights, name=None)` that apply L1
regularization.
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
float.
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
return lambda _: None

def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.op_scope([weights], name, 'l1_regularizer') as scope:
with ops.op_scope([weights], scope, 'l1_regularizer') as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=scope)
name=name)

return l1


def l2_regularizer(scale):
def l2_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional op_scope name.
Returns:
A function with signature `l2(weights, name=None)` that applies L2
regularization.
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
float.
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g.' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
return lambda _: None

def l2(weights, name=None):
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.op_scope([weights], name, 'l2_regularizer') as scope:
with ops.op_scope([weights], scope, 'l2_regularizer') as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)

return l2


def sum_regularizer(regularizer_list):
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional op_scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)


def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional op_scope name
Returns:
A function with signature `sum_reg(weights, name=None)` that applies the
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None

def sum_reg(weights, name=None):
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
with ops.op_scope([weights], name, 'sum_regularizer') as scope:
with ops.op_scope([weights], scope, 'sum_regularizer') as name:
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors, name=scope)
return math_ops.add_n(regularizer_tensors, name=name)

return sum_reg

Expand Down
33 changes: 29 additions & 4 deletions tensorflow/contrib/layers/python/layers/regularizers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@
class RegularizerTest(tf.test.TestCase):

def test_l1(self):
with self.assertRaises(ValueError):
tf.contrib.layers.l1_regularizer(2.)
with self.assertRaises(ValueError):
tf.contrib.layers.l1_regularizer(-1.)
with self.assertRaises(ValueError):
Expand All @@ -42,8 +40,6 @@ def test_l1(self):
self.assertAllClose(np.abs(values).sum() * .5, result)

def test_l2(self):
with self.assertRaises(ValueError):
tf.contrib.layers.l2_regularizer(2.)
with self.assertRaises(ValueError):
tf.contrib.layers.l2_regularizer(-1.)
with self.assertRaises(ValueError):
Expand All @@ -58,6 +54,35 @@ def test_l2(self):

self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result)

def test_l1_l2(self):
with self.assertRaises(ValueError):
tf.contrib.layers.l1_l2_regularizer(-1., 0.5)
with self.assertRaises(ValueError):
tf.contrib.layers.l1_l2_regularizer(0.5, -1.)
with self.assertRaises(ValueError):
tf.contrib.layers.l1_l2_regularizer(0, 0.5)
with self.assertRaises(ValueError):
tf.contrib.layers.l1_l2_regularizer(0.5, 0)

with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = tf.contrib.layers.l1_l2_regularizer(1.0, 1.0)(tensor)
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)

def testL1L2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
with tf.name_scope('foo'):
loss = tf.contrib.layers.l1_l2_regularizer(1.0, 1.0,
scope='l1_l2')(tensor)
self.assertEquals(loss.op.name, 'foo/l1_l2')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)

def test_sum_regularizer(self):
l1_function = tf.contrib.layers.l1_regularizer(.1)
l2_function = tf.contrib.layers.l2_regularizer(.2)
Expand Down
23 changes: 15 additions & 8 deletions tensorflow/contrib/learn/python/learn/estimators/dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,10 @@ def input_fn_eval: # returns x, Y
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not None, the probability we will drop out a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See tf.clip_by_global_norm
for more details.
config: RunConfig object to configure the runtime settings.
"""

def __init__(self,
Expand All @@ -189,15 +193,18 @@ def __init__(self,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
config=None):
super(DNNRegressor, self).__init__(model_dir=model_dir,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
config=config)
super(DNNRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
gradient_clip_norm=gradient_clip_norm,
config=config)

def _get_train_ops(self, features, targets):
"""See base class."""
Expand Down
Loading

0 comments on commit c679c2f

Please sign in to comment.