Skip to content

Commit

Permalink
Merge commit for internal changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Vijay Vasudevan committed Jun 8, 2016
2 parents b2a812b + 915f5ff commit 8ed31ff
Show file tree
Hide file tree
Showing 32 changed files with 1,526 additions and 101 deletions.
2 changes: 1 addition & 1 deletion eigen.BUILD
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"])

archive_dir = "eigen-eigen-62a2305d5734"
archive_dir = "eigen-eigen-5f86b31739cd"

cc_library(
name = "eigen",
Expand Down
1 change: 1 addition & 0 deletions tensorflow/contrib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from __future__ import print_function

# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import copy_graph
from tensorflow.contrib import ctc
from tensorflow.contrib import distributions
Expand Down
22 changes: 11 additions & 11 deletions tensorflow/contrib/bayesflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,24 @@ exports_files(["LICENSE"])

package(default_visibility = ["//tensorflow:__subpackages__"])

# load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
load("//tensorflow:tensorflow.bzl", "cuda_py_tests")

py_library(
name = "bayesflow_py",
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
srcs_version = "PY2AND3",
)

# cuda_py_tests(
# name = "some_bayesflow_test",
# size = "small",
# srcs = ["python/kernel_tests/some_bayesflow_test.py"],
# additional_deps = [
# ":bayesflow_py",
# "//tensorflow/python:framework_test_lib",
# "//tensorflow/python:platform_test",
# ],
# )
cuda_py_tests(
name = "stochastic_graph_test",
size = "small",
srcs = ["python/kernel_tests/stochastic_graph_test.py"],
additional_deps = [
":bayesflow_py",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
)

filegroup(
name = "all_files",
Expand Down
4 changes: 3 additions & 1 deletion tensorflow/contrib/bayesflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph
Original file line number Diff line number Diff line change
@@ -0,0 +1,271 @@
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import tensorflow as tf

sg = tf.contrib.bayesflow.stochastic_graph
distributions = tf.contrib.distributions


class NormalNotParam(distributions.Normal):

@property
def is_reparameterized(self):
return False


class DistributionTensorTest(tf.test.TestCase):

def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
sigma2 = tf.constant([0.1, 0.2, 0.3])
with self.assertRaisesRegexp(ValueError, 'No value type currently set'):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)

prior_0 = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma,
dist_value_type=sg.SampleAndReshapeValue())

with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma2)

coll = tf.get_collection(sg.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_0, prior, likelihood])

prior_0 = tf.identity(prior_0)
prior = tf.identity(prior) # Also works: tf.convert_to_tensor(prior)
likelihood = tf.identity(likelihood)

# Mostly a smoke test for now...
prior_0_val, prior_val, _ = sess.run(
[prior_0, prior, likelihood])

self.assertEqual(prior_0_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)

def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = tf.constant([1.1, 1.2, 1.3])

with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)

prior_mean = prior.mean()
prior_value = prior.value()

prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)

def testSampleAndReshapeValue(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = tf.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])

with sg.value_type(sg.SampleAndReshapeValue()):
prior_single = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)

prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))

prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))

with sg.value_type(sg.SampleAndReshapeValue(n=2)):
prior_double = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)

prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (4, 3))

prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (4, 3))

def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
entropy = prior.entropy()
deep_entropy = prior.entropy()
expected_deep_entropy = distributions.Normal(
mu=mu, sigma=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])


class ValueTypeTest(tf.test.TestCase):

def testValueType(self):
type_mean = sg.MeanValue()
type_one = sg.SampleAndReshapeValue()
with sg.value_type(type_mean):
self.assertEqual(sg.get_current_value_type(), type_mean)
with sg.value_type(type_one):
self.assertEqual(sg.get_current_value_type(), type_one)
self.assertEqual(sg.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, 'No value type currently set'):
sg.get_current_value_type()


class TestAdditionalScoreFunctionLosses(tf.test.TestCase):

def testPathwiseDerivativeDoesNotAddScoreFunctionLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma)
self.assertTrue(prior.distribution.is_reparameterized)
self.assertTrue(likelihood.distribution.is_reparameterized)

loss = tf.square(tf.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = tf.reduce_sum(loss)

surrogate_from_loss = sg.additional_score_function_losses([loss])
surrogate_from_sum_loss = sg.additional_score_function_losses([sum_loss])
surrogate_from_both = sg.additional_score_function_losses(
[loss, sum_loss])

# Pathwise derivative terms do not require score function
# surrogate losses.
self.assertEqual(surrogate_from_loss, [])
self.assertEqual(surrogate_from_sum_loss, [])
self.assertEqual(surrogate_from_both, [])

def _testScoreFunction(self, session, losses, expected, xs):
sf_losses = sg.additional_score_function_losses(losses)
n = len(expected)
self.assertEqual(len(expected), len(sf_losses))
values = session.run(list(expected) + sf_losses)

# Test forward surrogate losses
if isinstance(expected, set):
# Hack: sort the two halves of the values by norm, and compare
# those
sorted_expected = sorted(values[:n], key=np.linalg.norm)
sorted_losses = sorted(values[n:], key=np.linalg.norm)
self.assertAllClose(sorted_expected, sorted_losses)
else:
# Expected losses in a particular order
self.assertAllClose(values[:n], values[n:])

# Test backprop
expected_grads = tf.gradients(ys=losses + list(expected), xs=xs)
sf_grads = tf.gradients(ys=losses + sf_losses, xs=xs)
self.assertEqual(len(expected_grads), len(sf_grads))
n_grad = len(expected_grads)
grad_values = session.run(expected_grads + sf_grads)
self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:])

def testScoreFunction(self):
with self.test_session() as sess:
mu = tf.constant([0.0, 0.1, 0.2])
sigma = tf.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
NormalNotParam, mu=prior, sigma=sigma)
prior_2 = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)

loss = tf.square(tf.identity(likelihood) - mu)
part_loss = tf.square(tf.identity(prior) - mu)
sum_loss = tf.reduce_sum(loss)
loss_nodeps = tf.square(tf.identity(prior_2) - mu)

# For ground truth, use the stop-gradient versions of the losses
loss_nograd = tf.stop_gradient(loss)
loss_nodeps_nograd = tf.stop_gradient(loss_nodeps)
sum_loss_nograd = tf.stop_gradient(sum_loss)

# These score functions should ignore prior_2
self._testScoreFunction(
session=sess,
losses=[loss],
expected=set([
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
prior.distribution.log_pdf(prior.value()) * loss_nograd]),
xs=[mu, sigma])

self._testScoreFunction(
session=sess,
losses=[loss, part_loss],
expected=set([
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(part_loss + loss))]),
xs=[mu, sigma])

self._testScoreFunction(
session=sess,
losses=[sum_loss],
expected=set([
(likelihood.distribution.log_pdf(likelihood.value())
* sum_loss_nograd),
prior.distribution.log_pdf(prior.value()) * sum_loss_nograd]),
xs=[mu, sigma])

self._testScoreFunction(
session=sess,
losses=[loss, sum_loss],
expected=set([
(likelihood.distribution.log_pdf(likelihood.value())
* tf.stop_gradient(loss + sum_loss)),
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(loss + sum_loss))]),
xs=[mu, sigma])

# These score functions should ignore prior and likelihood
self._testScoreFunction(
session=sess,
losses=[loss_nodeps],
expected=[prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd],
xs=[mu, sigma])

# These score functions should include all terms selectively
self._testScoreFunction(
session=sess,
losses=[loss, loss_nodeps],
# We can't guarantee ordering of output losses in this case.
expected=set(
[(likelihood.distribution.log_pdf(likelihood.value())
* loss_nograd),
prior.distribution.log_pdf(prior.value()) * loss_nograd,
(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)]),
xs=[mu, sigma])


if __name__ == '__main__':
tf.test.main()
Loading

0 comments on commit 8ed31ff

Please sign in to comment.