Skip to content

Commit

Permalink
Merge changes from github.
Browse files Browse the repository at this point in the history
Change: 136750267
  • Loading branch information
Patrick Nguyen authored and tensorflower-gardener committed Oct 20, 2016
1 parent 8532897 commit c5ab3dd
Show file tree
Hide file tree
Showing 106 changed files with 1,573 additions and 503 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,6 @@ Hello, TensorFlow!
* [TensorFlow website](http://tensorflow.org)
* [TensorFlow whitepaper](http://download.tensorflow.org/paper/whitepaper2015.pdf)
* [TensorFlow Model Zoo](https://github.com/tensorflow/models)
* [TensorFlow MOOC on Udacity] (https://www.udacity.com/course/deep-learning--ud730)
* [TensorFlow MOOC on Udacity](https://www.udacity.com/course/deep-learning--ud730)

The TensorFlow community has created amazing things with TensorFlow, please see the [resources section of tensorflow.org](https://www.tensorflow.org/versions/master/resources#community) for an incomplete list.
2 changes: 1 addition & 1 deletion RELEASE.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ Snyder, @jpangburn, Jules Gagnon-Marchand, Karen Brems, @kborer, Kirill Bobyrev,
Laurent Mazare, Longqi Yang, Malith Yapa, Maniteja Nandana, Martin Englund,
Matthias Winkelmann, @mecab, Mu-Ik Jeon, Nand Dalal, Niels Ole Salscheider,
Nikhil Mishra, Park Jiin, Pieter De Rijk, @raix852, Ritwik Gupta, Sahil Sharma,
@Sangheum, @SergejsRk, Shinichiro Hamaji, Simon Denel, @Steve, @suiyuan2009,
Sangheum Hwang, @SergejsRk, Shinichiro Hamaji, Simon Denel, @Steve, @suiyuan2009,
Tiago Jorge, Tijmen Tieleman, @tvn, @tyfkda, Wang Yang, Wei-Ting Kuo, Wenjian
Huang, Yan Chen, @YenChenLin, Yuan (Terry) Tang, Yuncheng Li, Yunfeng Wang, Zack
Polizzi, @zhongzyd, Ziming Dong, @perhapszzy
Expand Down
34 changes: 29 additions & 5 deletions configure
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,22 @@ pushd `dirname $0` #> /dev/null
SOURCE_BASE_DIR=`pwd -P`
popd > /dev/null

PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
# On windows, the shell script is actually running in msys
if [[ "${PLATFORM}" =~ msys_nt* ]]; then
true
else
false
fi
}

function bazel_clean_and_fetch() {
bazel clean --expunge
# bazel clean --expunge currently doesn't work on Windows
# TODO(pcloudy): Re-enable it after bazel clean --expunge is fixed.
if ! is_windows; then
bazel clean --expunge
fi
bazel fetch //tensorflow/...
}

Expand All @@ -35,6 +49,12 @@ while true; do
# Retry
done

if is_windows; then
TF_NEED_GCP=0
TF_NEED_HDFS=0
TF_NEED_CUDA=0
fi

while [ "$TF_NEED_GCP" == "" ]; do
read -p "Do you wish to build TensorFlow with "\
"Google Cloud Platform support? [y/N] " INPUT
Expand Down Expand Up @@ -89,12 +109,16 @@ fi

## Find swig path
if [ -z "$SWIG_PATH" ]; then
SWIG_PATH=`type -p swig 2> /dev/null`
SWIG_PATH=`type -p swig 2> /dev/null || true`
fi
if [[ ! -e "$SWIG_PATH" ]]; then
echo "Can't find swig. Ensure swig is in \$PATH or set \$SWIG_PATH."
exit 1
fi
# Convert swig path to Windows style before writing into swig_path
if is_windows; then
SWIG_PATH="$(cygpath -m "$SWIG_PATH")"
fi
echo "$SWIG_PATH" > tensorflow/tools/swig/swig_path

# Invoke python_config and set up symlinks to python includes
Expand All @@ -104,7 +128,7 @@ echo "$SWIG_PATH" > tensorflow/tools/swig/swig_path
# git hash propagation
GEN_GIT_SOURCE=tensorflow/tools/git/gen_git_source.py
chmod a+x ${GEN_GIT_SOURCE}
${PYTHON_BIN_PATH} ${GEN_GIT_SOURCE} --configure ${SOURCE_BASE_DIR}
"${PYTHON_BIN_PATH}" ${GEN_GIT_SOURCE} --configure "${SOURCE_BASE_DIR}"

## Set up Cuda-related environment settings

Expand Down Expand Up @@ -255,8 +279,8 @@ while true; do
CUDA_DNN_LIB_PATH="lib64/libcudnn.so${TF_CUDNN_EXT}"
CUDA_DNN_LIB_ALT_PATH="libcudnn.so${TF_CUDNN_EXT}"
elif [ "$OSNAME" == "Darwin" ]; then
CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}.dylib"
CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}.dylib"
CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}"
CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}"
fi

if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" -o -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then
Expand Down
7 changes: 7 additions & 0 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@ config_setting(
visibility = ["//visibility:public"],
)

config_setting(
name = "windows",
values = {"cpu": "x64_windows_msvc"},
visibility = ["//visibility:public"],
)

config_setting(
name = "ios",
values = {
Expand Down Expand Up @@ -109,6 +115,7 @@ filegroup(
"//tensorflow/contrib/ndlstm:all_files",
"//tensorflow/contrib/opt:all_files",
"//tensorflow/contrib/rnn:all_files",
"//tensorflow/contrib/seq2seq:all_files",
"//tensorflow/contrib/session_bundle:all_files",
"//tensorflow/contrib/session_bundle/example:all_files",
"//tensorflow/contrib/slim:all_files",
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/c/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -965,8 +965,6 @@ typedef struct TF_Library TF_Library;
// Pass "library_filename" to a platform-specific mechanism for dynamically
// loading a library. The rules for determining the exact location of the
// library are platform-specific and are not documented here.
// Expects the symbols "RegisterOps", "RegisterKernels", and "GetOpList", to be
// defined in the library.
//
// On success, place OK in status and return the newly created library handle.
// The caller owns the library handle.
Expand Down
1 change: 1 addition & 0 deletions tensorflow/contrib/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ py_library(
"//tensorflow/contrib/opt:opt_py",
"//tensorflow/contrib/quantization:quantization_py",
"//tensorflow/contrib/rnn:rnn_py",
"//tensorflow/contrib/seq2seq:seq2seq_py",
"//tensorflow/contrib/slim",
"//tensorflow/contrib/slim:nets",
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
Expand Down
1 change: 1 addition & 0 deletions tensorflow/contrib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import rnn
from tensorflow.contrib import seq2seq
from tensorflow.contrib import slim
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
Expand Down
5 changes: 4 additions & 1 deletion tensorflow/contrib/cmake/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,17 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_definitions(-DEIGEN_AVOID_STL_ARRAY)
if(WIN32)
add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC -D__VERSION__=\"MSVC\")
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} /MP)
# Suppress warnings to reduce build log size.
add_definitions(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
endif()

if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} "-fno-exceptions -std=c++11")
endif()

# External dependencies
include(zlib)
include(gif)
include(png)
include(jpeg)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/cmake/external/grpc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ endif()

ExternalProject_Add(grpc
PREFIX grpc
DEPENDS protobuf
DEPENDS protobuf zlib
GIT_REPOSITORY ${GRPC_URL}
GIT_TAG ${GRPC_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/contrib/cmake/external/png.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ set(png_HEADERS

ExternalProject_Add(png
PREFIX png
DEPENDS zlib
URL ${png_URL}
URL_HASH ${png_HASH}
INSTALL_DIR ${png_INSTALL}
Expand All @@ -28,6 +29,7 @@ ExternalProject_Add(png
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DCMAKE_INSTALL_PREFIX:STRING=${png_INSTALL}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DZLIB_ROOT:STRING=${ZLIB_INSTALL}
)

## put png includes in the directory where they are expected
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/contrib/cmake/external/protobuf.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ endif()

ExternalProject_Add(protobuf
PREFIX protobuf
DEPENDS zlib
GIT_REPOSITORY ${PROTOBUF_URL}
GIT_TAG ${PROTOBUF_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
Expand All @@ -29,4 +30,5 @@ ExternalProject_Add(protobuf
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DZLIB_ROOT:STRING=${ZLIB_INSTALL}
)
46 changes: 46 additions & 0 deletions tensorflow/contrib/cmake/external/zlib.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
include (ExternalProject)

set(zlib_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/external/zlib_archive)
set(ZLIB_URL https://github.com/madler/zlib)
set(ZLIB_BUILD ${CMAKE_CURRENT_BINARY_DIR}/zlib/src/zlib)
set(ZLIB_INSTALL ${CMAKE_CURRENT_BINARY_DIR}/zlib/install)
set(ZLIB_TAG 50893291621658f355bc5b4d450a8d06a563053d)

if(WIN32)
set(zlib_STATIC_LIBRARIES
${CMAKE_CURRENT_BINARY_DIR}/zlib/install/lib/zlib.lib)
else()
set(zlib_STATIC_LIBRARIES
${CMAKE_CURRENT_BINARY_DIR}/zlib/install/lib/libz.a)
endif()

set(ZLIB_HEADERS
"${ZLIB_INSTALL}/include/zconf.h"
"${ZLIB_INSTALL}/include/zlib.h"
)

ExternalProject_Add(zlib
PREFIX zlib
GIT_REPOSITORY ${ZLIB_URL}
GIT_TAG ${ZLIB_TAG}
INSTALL_DIR ${ZLIB_INSTALL}
BUILD_IN_SOURCE 1
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_INSTALL_PREFIX:STRING=${ZLIB_INSTALL}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
)

# put zlib includes in the directory where they are expected
add_custom_target(zlib_create_destination_dir
COMMAND ${CMAKE_COMMAND} -E make_directory ${zlib_INCLUDE_DIR}
DEPENDS zlib)

add_custom_target(zlib_copy_headers_to_destination
DEPENDS zlib_create_destination_dir)

foreach(header_file ${ZLIB_HEADERS})
add_custom_command(TARGET zlib_copy_headers_to_destination PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${zlib_INCLUDE_DIR})
endforeach()
3 changes: 0 additions & 3 deletions tensorflow/contrib/cmake/tf_python.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,6 @@ if(NOT NUMPY_INCLUDE_DIR)
endif(${NUMPY_NOT_FOUND})
endif(NOT NUMPY_INCLUDE_DIR)

# 3. Resolve the installed version of zlib (for libz.so).
find_package(ZLIB REQUIRED)


########################################################
# Build the Python directory structure.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from __future__ import print_function

import numpy as np
from scipy import stats
from scipy import stats, special
import tensorflow as tf


Expand Down Expand Up @@ -308,6 +308,40 @@ def testBetaWithSoftplusAB(self):
self.assertAllClose(tf.nn.softplus(a).eval(), dist.a.eval())
self.assertAllClose(tf.nn.softplus(b).eval(), dist.b.eval())

def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4,5)]:
a1 = 6.0*np.random.random(size=shape) + 1e-4
b1 = 6.0*np.random.random(size=shape) + 1e-4
a2 = 6.0*np.random.random(size=shape) + 1e-4
b2 = 6.0*np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusAB
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)

d1 = tf.contrib.distributions.Beta(a=a1, b=b1)
d2 = tf.contrib.distributions.Beta(a=a2, b=b2)
d1_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a1_sp, b=b1_sp)
d2_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a2_sp, b=b2_sp)

kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1)
+ (a1 - a2)*special.digamma(a1)
+ (b1 - b2)*special.digamma(b1)
+ (a2 - a1 + b2 - b1)*special.digamma(a1 + b1))

for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = tf.contrib.distributions.kl(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)

# Make sure KL(d1||d1) is 0
kl_same = sess.run(tf.contrib.distributions.kl(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))


if __name__ == "__main__":
tf.test.main()
Original file line number Diff line number Diff line change
Expand Up @@ -222,5 +222,34 @@ def testMode(self):
dist = tf.contrib.distributions.Categorical(tf.log(histograms) - 50.)
self.assertAllEqual(dist.mode().eval(), [[1, 0]])

def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)

with self.test_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)

a = tf.contrib.distributions.Categorical(logits=a_logits)
b = tf.contrib.distributions.Categorical(logits=b_logits)

kl = tf.contrib.distributions.kl(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(tf.contrib.distributions.kl(a, a))

prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(
prob_a * (np.log(prob_a) - np.log(prob_b)), axis=-1)

self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))


if __name__ == "__main__":
tf.test.main()
37 changes: 37 additions & 0 deletions tensorflow/contrib/distributions/python/ops/beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
Expand Down Expand Up @@ -299,3 +300,39 @@ def __init__(self,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters


def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batched KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
inputs = [d1.a, d1.b, d1.a_b_sum, d2.a_b_sum]
with ops.name_scope(name, "kl_beta_beta", inputs):
# ln(B(a', b') / B(a, b))
log_betas = (math_ops.lgamma(d2.a) + math_ops.lgamma(d2.b)
- math_ops.lgamma(d2.a_b_sum) + math_ops.lgamma(d1.a_b_sum)
- math_ops.lgamma(d1.a) - math_ops.lgamma(d1.b))
# (a - a')*psi(a) + (b - b')*psi(b) + (a' - a + b' - b)*psi(a + b)
digammas = ((d1.a - d2.a)*math_ops.digamma(d1.a)
+ (d1.b - d2.b)*math_ops.digamma(d1.b)
+ (d2.a_b_sum - d1.a_b_sum)*math_ops.digamma(d1.a_b_sum))
return log_betas + digammas


# Register KL divergences.
kl_classes = [
Beta,
BetaWithSoftplusAB,
]

for beta_aa in kl_classes:
for beta_bb in kl_classes:
kullback_leibler.RegisterKL(beta_aa, beta_bb)(_kl_beta_beta)
Loading

0 comments on commit c5ab3dd

Please sign in to comment.