From ca4cee0a7a78f4613636bee50260d7af2865f0be Mon Sep 17 00:00:00 2001 From: Vijay Vasudevan Date: Sat, 7 Nov 2015 10:49:41 -0800 Subject: [PATCH] TensorFlow: Upstream latest commits to git. Changes: - Updates to Documentation, README.md, installation instructions, anchor links, etc. - Adds Readme for embedding directory. Base CL: 107308461 --- README.md | 17 ++++- tensorflow/g3doc/api_docs/python/client.md | 73 +++++++++++++++++++ tensorflow/g3doc/api_docs/python/index.md | 1 + tensorflow/g3doc/get_started/index.md | 4 +- tensorflow/g3doc/get_started/os_setup.md | 34 +-------- .../g3doc/how_tos/adding_an_op/index.md | 30 ++++---- tensorflow/g3doc/how_tos/graph_viz/index.md | 2 +- tensorflow/g3doc/how_tos/index.md | 8 +- .../g3doc/how_tos/new_data_formats/index.md | 2 +- .../g3doc/how_tos/reading_data/index.md | 9 ++- .../summaries_and_tensorboard/index.md | 2 +- tensorflow/g3doc/resources/index.md | 4 +- tensorflow/g3doc/tutorials/deep_cnn/index.md | 14 ++-- tensorflow/g3doc/tutorials/seq2seq/index.md | 3 +- tensorflow/models/embedding/README.md | 53 ++++++++++++++ tensorflow/models/embedding/word2vec.py | 5 +- .../models/embedding/word2vec_optimized.py | 6 +- tensorflow/python/client/client_lib.py | 1 + tensorflow/python/client/session.py | 51 ++++++++++--- .../python/framework/gen_docs_combined.py | 3 +- tensorflow/tools/docker/Dockerfile.cpu | 14 ++-- 21 files changed, 242 insertions(+), 94 deletions(-) create mode 100644 tensorflow/models/embedding/README.md diff --git a/README.md b/README.md index 05bfec84315d2e..102f3742e8c425 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,21 @@ organization for the purposes of conducting machine learning and deep neural networks research. The system is general enough to be applicable in a wide variety of other domains, as well. + +**Note: Currently we do not accept pull requests on github -- see +[CONTRIBUTING.md](CONTRIBUTING.md) for information on how to contribute code +changes to TensorFlow through +[tensorflow.googlesource.com](https://tensorflow.googlesource.com/tensorflow)** + +**We use [github issues](https://github.com/tensorflow/tensorflow/issues) for +tracking requests and bugs, but please see +[Community](resources/index.md#community) for general questions and +discussion.** + # Download and Setup -For detailed installation instructions, see +To install TensorFlow using a binary package, see the instructions below. For +more detailed installation instructions, including installing from source, see [here](tensorflow/g3doc/get_started/os_setup.md). ## Binary Installation @@ -32,7 +44,8 @@ Install TensorFlow: # For CPU-only version $ sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl -# For GPU-enabled version +# For GPU-enabled version. See detailed install instructions +# for GPU configuration information. $ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl ``` diff --git a/tensorflow/g3doc/api_docs/python/client.md b/tensorflow/g3doc/api_docs/python/client.md index 3ab0af0abf9fd9..8db13549b3132d 100644 --- a/tensorflow/g3doc/api_docs/python/client.md +++ b/tensorflow/g3doc/api_docs/python/client.md @@ -5,6 +5,7 @@ ## Contents * [Session management](#AUTOGENERATED-session-management) * [class tf.Session](#Session) + * [class tf.InteractiveSession](#InteractiveSession) * [tf.get_default_session()](#get_default_session) * [Error classes](#AUTOGENERATED-error-classes) * [class tf.OpError](#OpError) @@ -257,6 +258,78 @@ thread's function. +- - - + +### class tf.InteractiveSession
{#InteractiveSession}
+ +A TensorFlow `Session` for use in interactive contexts, such as a shell. + +The only difference with a regular `Session` is that an `InteractiveSession` +installs itself as the default session on construction. +The methods [`Tensor.eval()`](framework.md#Tensor.eval) and +[`Operation.run()`](framework.md#Operation.run) will use that session +to run ops. + +This is convenient in interactive shells and [IPython +notebooks](http://ipython.org), as it avoids having to pass an explicit +`Session` object to run ops. + +For example: + +```python +sess = tf.InteractiveSession() +a = tf.constant(5.0) +b = tf.constant(6.0) +c = a * b +# We can just use 'c.eval()' without passing 'sess' +print c.eval() +sess.close() +``` + +Note that a regular session installs itself as the default session when it +is created in a `with` statement. The common usage in non-interactive +programs is to follow that pattern: + +```python +a = tf.constant(5.0) +b = tf.constant(6.0) +c = a * b +with tf.Session(): + # We can also use 'c.eval()' here. + print c.eval() +``` + +- - - + +#### tf.InteractiveSession.__init__(target='', graph=None) {#InteractiveSession.__init__} + +Creates a new interactive TensorFlow session. + +If no `graph` argument is specified when constructing the session, +the default graph will be launched in the session. If you are +using more than one graph (created with `tf.Graph()` in the same +process, you will have to use different sessions for each graph, +but each graph can be used in multiple sessions. In this case, it +is often clearer to pass the graph to be launched explicitly to +the session constructor. + +##### Args: + + +* target: (Optional.) The execution engine to connect to. + Defaults to using an in-process engine. At present, no value + other than the empty string is supported. +* graph: (Optional.) The `Graph` to be launched (described above). + + +- - - + +#### tf.InteractiveSession.close() {#InteractiveSession.close} + +Closes an `InteractiveSession`. + + + - - - diff --git a/tensorflow/g3doc/api_docs/python/index.md b/tensorflow/g3doc/api_docs/python/index.md index dd47b703fc53a5..f905cd0990409c 100644 --- a/tensorflow/g3doc/api_docs/python/index.md +++ b/tensorflow/g3doc/api_docs/python/index.md @@ -299,6 +299,7 @@ * [`DeadlineExceededError`](client.md#DeadlineExceededError) * [`FailedPreconditionError`](client.md#FailedPreconditionError) * [`get_default_session`](client.md#get_default_session) + * [`InteractiveSession`](client.md#InteractiveSession) * [`InternalError`](client.md#InternalError) * [`InvalidArgumentError`](client.md#InvalidArgumentError) * [`NotFoundError`](client.md#NotFoundError) diff --git a/tensorflow/g3doc/get_started/index.md b/tensorflow/g3doc/get_started/index.md index f61fd4c1e9505c..bc48f11c1829c8 100644 --- a/tensorflow/g3doc/get_started/index.md +++ b/tensorflow/g3doc/get_started/index.md @@ -54,10 +54,10 @@ suggest skimming blue, then red.
- + - +

Images licensed CC BY-SA 4.0; original by W. Carter

diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md index 0917f16832ddf7..bae56f81793a95 100644 --- a/tensorflow/g3doc/get_started/os_setup.md +++ b/tensorflow/g3doc/get_started/os_setup.md @@ -88,35 +88,8 @@ ImportError: libcudart.so.7.0: cannot open shared object file: No such file or d you most likely need to set your `LD_LIBRARY_PATH` to point to the location of your CUDA libraries. -### Train the MNIST neural net model - -```sh -$ python tensorflow/models/image/mnist/convolutional.py -Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes. -Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes. -Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes. -Succesfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes. -Extracting data/train-images-idx3-ubyte.gz -Extracting data/train-labels-idx1-ubyte.gz -Extracting data/t10k-images-idx3-ubyte.gz -Extracting data/t10k-labels-idx1-ubyte.gz -can't determine number of CPU cores: assuming 4 -I tensorflow/core/common_runtime/local_device.cc:25] Local device intra op -parallelism threads: 3 -can't determine number of CPU cores: assuming 4 -I tensorflow/core/common_runtime/local_session.cc:45] Local session inter op -parallelism threads: 4 -Initialized! -Epoch 0.00 -Minibatch loss: 12.054, learning rate: 0.010000 -Minibatch error: 90.6% -Validation error: 84.6% -... -... - -``` - -## Installing from sources {#source} + +## Installing from sources ### Clone the TensorFlow repository @@ -260,7 +233,8 @@ Notes : You need to install Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/install.html). -### Create the pip package and install {#create-pip} + +### Create the pip package and install ```sh $ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package diff --git a/tensorflow/g3doc/how_tos/adding_an_op/index.md b/tensorflow/g3doc/how_tos/adding_an_op/index.md index 8702569f75a48e..ee1f05029b5758 100644 --- a/tensorflow/g3doc/how_tos/adding_an_op/index.md +++ b/tensorflow/g3doc/how_tos/adding_an_op/index.md @@ -1,4 +1,4 @@ -# Adding a New Op to TensorFlow +# Adding a New Op PREREQUISITES: @@ -27,27 +27,28 @@ to: ## Contents -* [Define the Op's interface](#define_interface) +* [Define the Op's interface](#AUTOGENERATED-define-the-op-s-interface) * [Implement the kernel for the Op](#AUTOGENERATED-implement-the-kernel-for-the-op) * [Generate the client wrapper](#AUTOGENERATED-generate-the-client-wrapper) * [The Python Op wrapper](#AUTOGENERATED-the-python-op-wrapper) * [The C++ Op wrapper](#AUTOGENERATED-the-c---op-wrapper) * [Verify it works](#AUTOGENERATED-verify-it-works) -* [Validation](#validation) +* [Validation](#AUTOGENERATED-validation) * [Op registration](#AUTOGENERATED-op-registration) * [Attrs](#AUTOGENERATED-attrs) * [Attr types](#AUTOGENERATED-attr-types) - * [Polymorphism](#polymorphism) + * [Polymorphism](#AUTOGENERATED-polymorphism) * [Inputs and Outputs](#AUTOGENERATED-inputs-and-outputs) * [Backwards compatibility](#AUTOGENERATED-backwards-compatibility) -* [GPU Support](#mult-archs) +* [GPU Support](#AUTOGENERATED-gpu-support) * [Implement the gradient in Python](#AUTOGENERATED-implement-the-gradient-in-python) * [Implement a shape function in Python](#AUTOGENERATED-implement-a-shape-function-in-python) -## Define the Op's interface
{#define_interface}
+ +## Define the Op's interface
{#AUTOGENERATED-define-the-op-s-interface}
You define the interface of an Op by registering it with the TensorFlow system. In the registration, you specify the name of your Op, its inputs (types and @@ -210,7 +211,7 @@ Then run your test: $ bazel test tensorflow/python:zero_out_op_test ``` -## Validation
{#validation}
+## Validation
{#AUTOGENERATED-validation}
The example above assumed that the Op applied to a tensor of any shape. What if it only applied to vectors? That means adding a check to the above OpKernel @@ -445,8 +446,8 @@ REGISTER_OP("AttrDefaultExampleForAllTypes") Note in particular that the values of type `type` use [the `DT_*` names for the types](../../resources/dims_types.md#data-types). -### Polymorphism
{#polymorphism}
-#### Type Polymorphism {#type-polymorphism} +### Polymorphism
{#AUTOGENERATED-polymorphism}
+#### Type Polymorphism For ops that can take different types as input or produce different output types, you can specify [an attr](#attrs) in @@ -466,7 +467,8 @@ REGISTER\_OP("ZeroOut") Your Op registration now specifies that the input's type must be `float`, or `int32`, and that its output will be the same type, since both have type `T`. -> A note on naming:{#naming} Inputs, outputs, and attrs generally should be + +> A note on naming: Inputs, outputs, and attrs generally should be > given snake_case names. The one exception is attrs that are used as the type > of an input or in the type of an input. Those attrs can be inferred when the > op is added to the graph and so don't appear in the op's function. For @@ -482,7 +484,7 @@ Your Op registration now specifies that the input's type must be `float`, or > name: A name for the operation (optional). > > Returns: -> A `Tensor`. Has the same type as `x`. +> A `Tensor`. Has the same type as `to_zero`. > """ > ``` > @@ -674,7 +676,8 @@ TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL); #undef REGISTER_KERNEL ``` -#### List Inputs and Outputs {#list-input-output} + +#### List Inputs and Outputs In addition to being able to accept or produce different types, ops can consume or produce a variable number of tensors. @@ -894,7 +897,8 @@ There are several ways to preserve backwards-compatibility. If you cannot make your change to an operation backwards compatible, then create a new operation with a new name with the new semantics. -## GPU Support
{#mult-archs}
+ +## GPU Support
{#AUTOGENERATED-gpu-support}
You can implement different OpKernels and register one for CPU and another for GPU, just like you can [register kernels for different types](#polymorphism). diff --git a/tensorflow/g3doc/how_tos/graph_viz/index.md b/tensorflow/g3doc/how_tos/graph_viz/index.md index f0a1fc2fe75986..7e3e3fde60a656 100644 --- a/tensorflow/g3doc/how_tos/graph_viz/index.md +++ b/tensorflow/g3doc/how_tos/graph_viz/index.md @@ -1,4 +1,4 @@ -# TensorBoard: Visualizing Your Graph +# TensorBoard: Graph Visualization TensorFlow computation graphs are powerful but complicated. The graph visualization can help you understand and debug them. Here's an example of the visualization at work. diff --git a/tensorflow/g3doc/how_tos/index.md b/tensorflow/g3doc/how_tos/index.md index f5c74715e8c326..d04ee2a2db5c2d 100644 --- a/tensorflow/g3doc/how_tos/index.md +++ b/tensorflow/g3doc/how_tos/index.md @@ -18,7 +18,7 @@ example. [View Tutorial](../tutorials/mnist/tf/index.md) -## TensorBoard: Visualizing Your Training +## TensorBoard: Visualizing Learning TensorBoard is a useful tool for visualizing the training and evaluation of your model(s). This tutorial describes how to build and run TensorBoard as well @@ -28,7 +28,7 @@ TensorBoard uses for display. [View Tutorial](summaries_and_tensorboard/index.md) -## TensorBoard: Visualizing Your Graph +## TensorBoard: Graph Visualization This tutorial describes how to use the graph visualizer in TensorBoard to help you understand the dataflow graph and debug it. @@ -60,7 +60,7 @@ compose in your graph, but here are the details of how to add you own custom Op. [View Tutorial](adding_an_op/index.md) -## New Data Formats +## Custom Data Readers If you have a sizable custom data set, you may want to consider extending TensorFlow to read your data directly in it's native format. Here's how. @@ -68,7 +68,7 @@ TensorFlow to read your data directly in it's native format. Here's how. [View Tutorial](new_data_formats/index.md) -## Using One or More GPUs +## Using GPUs This tutorial describes how to construct and execute models on GPU(s). diff --git a/tensorflow/g3doc/how_tos/new_data_formats/index.md b/tensorflow/g3doc/how_tos/new_data_formats/index.md index b1b09fe1ff1d71..a8fa7c42d4ed9f 100644 --- a/tensorflow/g3doc/how_tos/new_data_formats/index.md +++ b/tensorflow/g3doc/how_tos/new_data_formats/index.md @@ -1,4 +1,4 @@ -# Extending TF: Supporting new data formats +# Custom Data Readers PREREQUISITES: diff --git a/tensorflow/g3doc/how_tos/reading_data/index.md b/tensorflow/g3doc/how_tos/reading_data/index.md index 2b305f9333b3a8..b37d3042e7ccbb 100644 --- a/tensorflow/g3doc/how_tos/reading_data/index.md +++ b/tensorflow/g3doc/how_tos/reading_data/index.md @@ -10,13 +10,13 @@ There are three main methods of getting data into a TensorFlow program: ## Contents -* [Feeding](#Feeding) +* [Feeding](#AUTOGENERATED-feeding) * [Reading from files](#AUTOGENERATED-reading-from-files) * [Filenames, shuffling, and epoch limits](#AUTOGENERATED-filenames--shuffling--and-epoch-limits) * [File formats](#AUTOGENERATED-file-formats) * [Preprocessing](#AUTOGENERATED-preprocessing) * [Batching](#AUTOGENERATED-batching) - * [Creating threads to prefetch using `QueueRunner` objects](#QueueRunner) + * [Creating threads to prefetch using `QueueRunner` objects](#AUTOGENERATED-creating-threads-to-prefetch-using--queuerunner--objects) * [Filtering records or producing multiple examples per record](#AUTOGENERATED-filtering-records-or-producing-multiple-examples-per-record) * [Sparse input data](#AUTOGENERATED-sparse-input-data) * [Preloaded data](#AUTOGENERATED-preloaded-data) @@ -25,7 +25,7 @@ There are three main methods of getting data into a TensorFlow program: -## Feeding
{#Feeding}
+## Feeding
{#AUTOGENERATED-feeding}
TensorFlow's feed mechanism lets you inject data into any Tensor in a computation graph. A python computation can thus feed data directly into the @@ -267,7 +267,8 @@ summary to the graph that indicates how full the example queue is. If you have enough reading threads, that summary will stay above zero. You can [view your summaries as training progresses using TensorBoard](../summaries_and_tensorboard/index.md). -### Creating threads to prefetch using `QueueRunner` objects
{#QueueRunner}
+ +### Creating threads to prefetch using `QueueRunner` objects
{#AUTOGENERATED-creating-threads-to-prefetch-using--queuerunner--objects}
The short version: many of the `tf.train` functions listed above add [`QueueRunner`](../../api_docs/python/train.md#QueueRunner) objects to your diff --git a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md index 65731877c10089..eb22df184dbb7a 100644 --- a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md +++ b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md @@ -1,4 +1,4 @@ -# TensorBoard: Visualizing Your Training +# TensorBoard: Visualizing Learning The computations you'll use TensorBoard for - like training a massive deep neural network - can be complex and confusing. To make it easier to diff --git a/tensorflow/g3doc/resources/index.md b/tensorflow/g3doc/resources/index.md index 5e279e49450694..0519c97b530d71 100644 --- a/tensorflow/g3doc/resources/index.md +++ b/tensorflow/g3doc/resources/index.md @@ -11,8 +11,8 @@ implementation can be found in out white paper: ### Citation If you use TensorFlow in your research and would like to cite the TensorFlow -system, we suggest you cite the paper above. You can use this [BibTeX -entry](bib.md). As the project progresses, we +system, we suggest you cite the paper above. +You can use this [BibTeX entry](bib.md). As the project progresses, we may update the suggested citation with new papers. diff --git a/tensorflow/g3doc/tutorials/deep_cnn/index.md b/tensorflow/g3doc/tutorials/deep_cnn/index.md index 929e1b30474504..906093009efb12 100644 --- a/tensorflow/g3doc/tutorials/deep_cnn/index.md +++ b/tensorflow/g3doc/tutorials/deep_cnn/index.md @@ -33,11 +33,11 @@ experiments. The CIFAR-10 tutorial demonstrates several important constructs for designing larger and more sophisticated models in TensorFlow: -* Core mathematical components including -[convolution](../../api_docs/python/nn.md#conv2d), -[rectified linear activations](../../api_docs/python/nn.md#relu), -[max pooling](../../api_docs/python/nn.md#max_pool) and -[local response normalization](../../api_docs/python/nn.md#local_response_normalization). +* Core mathematical components including[convolution]( +../../api_docs/python/nn.md#conv2d), [rectified linear activations]( +../../api_docs/python/nn.md#relu), [max pooling]( +../../api_docs/python/nn.md#max_pool) and [local response normalization]( +../../api_docs/python/nn.md#local_response_normalization). * [Visualization](../../how_tos/summaries_and_tensorboard/index.md) of network activity during training including input images, losses and distributions of activations and gradients. @@ -341,8 +341,8 @@ all learned model parameters with the moving average version. This substitution boosts model performance at evaluation time. > **EXERCISE:** Employing averaged parameters may boost predictive performance -by about 3% as measured by precision@1. Edit `cifar10_eval.py` to not employ the -averaged parameters for the model and verify that the predictive performance +by about 3% as measured by precision @ 1. Edit `cifar10_eval.py` to not employ +the averaged parameters for the model and verify that the predictive performance drops. diff --git a/tensorflow/g3doc/tutorials/seq2seq/index.md b/tensorflow/g3doc/tutorials/seq2seq/index.md index 3eec2a2ba8e340..b91688691dff62 100644 --- a/tensorflow/g3doc/tutorials/seq2seq/index.md +++ b/tensorflow/g3doc/tutorials/seq2seq/index.md @@ -230,7 +230,8 @@ with encoder inputs representing `[PAD PAD "." "go" "I"]` and decoder inputs `[GO "Je" "vais" "." EOS PAD PAD PAD PAD PAD]`. -## Let's Run It {#run_it} + +## Let's Run It To train the model described above, we need to a large English-French corpus. We will use the *10^9-French-English corpus* from the diff --git a/tensorflow/models/embedding/README.md b/tensorflow/models/embedding/README.md new file mode 100644 index 00000000000000..5aac00e5d745a2 --- /dev/null +++ b/tensorflow/models/embedding/README.md @@ -0,0 +1,53 @@ +This directory contains models for unsupervised training of word embeddings +using the model described in: + +(Mikolov, et. al.) [Efficient Estimation of Word Representations in Vector Space](http://arxiv.org/abs/1301.3781), +ICLR 2013. + +Detailed instructions on how to get started and use them are available in the +tutorials. Brief instructions are below. + +* [Word2Vec Tutorial](http://tensorflow.org/tutorials/word2vec/) + +To download the example text and evaluation data: + +```shell +wget http://mattmahoney.net/dc/text8.zip -O text8.gz +gzip -d text8.gz -f +wget https://word2vec.googlecode.com/svn/trunk/questions-words.txt +``` + +To build everything under models/embedding/...: + +```shell +bazel build -c opt tensorflow/models/embedding:all +``` + +To run the code manually: + +```shell +bazel-bin/tensorflow/models/embedding/word2vec_optimized \ + --train_data=text8 \ + --eval_data=questions-words.txt \ + --save_path=/tmp/ +``` + +To run the code via bazel: + +```shell +bazel run -c opt tensorflow/models/embedding/word2vec_optimized -- \ + --train_data=text8 \ + --eval_data=questions-words.txt \ + --save_path=/tmp/ +``` + +Here is a short overview of what is in this directory. + +File | What's in it? +--- | --- +`word2vec.py` | A version of word2vec implemented using Tensorflow ops and minibatching. +`word2vec_test.py` | Integration test for word2vec. +`word2vec_optimized.py` | A version of word2vec implemented using C ops that does no minibatching. +`word2vec_optimized_test.py` | Integration test for word2vec_optimized. +`word2vec_kernels.cc` | Kernels for the custom input and training ops. +`word2vec_ops.cc` | The declarations of the custom ops. diff --git a/tensorflow/models/embedding/word2vec.py b/tensorflow/models/embedding/word2vec.py index a31de44a1d1167..12fb994d984923 100644 --- a/tensorflow/models/embedding/word2vec.py +++ b/tensorflow/models/embedding/word2vec.py @@ -14,6 +14,7 @@ * skipgram custom op that does input processing. """ +import os import sys import threading import time @@ -357,7 +358,7 @@ def build_graph(self): def save_vocab(self): """Save the vocabulary to a file so the model can be reloaded.""" opts = self._options - with open(opts.save_path + "/vocab.txt", "w") as f: + with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f: for i in xrange(opts.vocab_size): f.write(opts.vocab_words[i] + " " + str(opts.vocab_counts[i]) + "\n") @@ -493,7 +494,7 @@ def main(_): model.eval() # Eval analogies. # Perform a final save. model.saver.save(session, - opts.save_path + "model", + os.path.join(opts.save_path, "model.ckpt"), global_step=model.global_step) if FLAGS.interactive: # E.g., diff --git a/tensorflow/models/embedding/word2vec_optimized.py b/tensorflow/models/embedding/word2vec_optimized.py index 4d69601576c712..38fac1651db28f 100644 --- a/tensorflow/models/embedding/word2vec_optimized.py +++ b/tensorflow/models/embedding/word2vec_optimized.py @@ -13,6 +13,7 @@ true SGD. """ +import os import sys import threading import time @@ -217,7 +218,7 @@ def build_graph(self): def save_vocab(self): """Save the vocabulary to a file so the model can be reloaded.""" opts = self._options - with open(opts.save_path + "/vocab.txt", "w") as f: + with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f: for i in xrange(opts.vocab_size): f.write(opts.vocab_words[i] + " " + str(opts.vocab_counts[i]) + "\n") @@ -396,7 +397,8 @@ def main(_): model.train() # Process one epoch model.eval() # Eval analogies. # Perform a final save. - model.saver.save(session, opts.save_path + "model", global_step=model.step) + model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"), + global_step=model.step) if FLAGS.interactive: # E.g., # [0]: model.Analogy('france', 'paris', 'russia') diff --git a/tensorflow/python/client/client_lib.py b/tensorflow/python/client/client_lib.py index 9148ed17c0143a..4c7caa8a24dda6 100644 --- a/tensorflow/python/client/client_lib.py +++ b/tensorflow/python/client/client_lib.py @@ -7,6 +7,7 @@ ## Session management @@Session +@@InteractiveSession @@get_default_session diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py index 7da9b41cf42df0..571030de67c96f 100644 --- a/tensorflow/python/client/session.py +++ b/tensorflow/python/client/session.py @@ -524,32 +524,61 @@ def __exit__(self, exec_type, exec_value, exec_tb): class InteractiveSession(BaseSession): """A TensorFlow `Session` for use in interactive contexts, such as a shell. - In some cases, such as interactive shells and IPython notebooks, it is - useful to be able to define a `Session` without using a with block: this - style enables statements to be executed immediately, rather than at the - termination of the block. In that case, it must be closed using - `Session.close()`. For example: + The only difference with a regular `Session` is that an `InteractiveSession` + installs itself as the default session on construction. + The methods [`Tensor.eval()`](framework.md#Tensor.eval) and + [`Operation.run()`](framework.md#Operation.run) will use that session + to run ops. + + This is convenient in interactive shells and [IPython + notebooks](http://ipython.org), as it avoids having to pass an explicit + `Session` object to run ops. + + For example: ```python - sess = InteractiveSession() + sess = tf.InteractiveSession() a = tf.constant(5.0) b = tf.constant(6.0) c = a * b - print c.run() + # We can just use 'c.eval()' without passing 'sess' + print c.eval() sess.close() ``` + Note that a regular session installs itself as the default session when it + is created in a `with` statement. The common usage in non-interactive + programs is to follow that pattern: + + ```python + a = tf.constant(5.0) + b = tf.constant(6.0) + c = a * b + with tf.Session(): + # We can also use 'c.eval()' here. + print c.eval() + ``` + @@__init__ @@close """ def __init__(self, target='', graph=None): - """Initializes an `InteractiveSession` object similar to `Session`. + """Creates a new interactive TensorFlow session. + + If no `graph` argument is specified when constructing the session, + the default graph will be launched in the session. If you are + using more than one graph (created with `tf.Graph()` in the same + process, you will have to use different sessions for each graph, + but each graph can be used in multiple sessions. In this case, it + is often clearer to pass the graph to be launched explicitly to + the session constructor. Args: - target: Optional. The TensorFlow execution engine to connect to. - graph: Optional. The `Graph` object to be used. If this argument is None, - the default graph will be used. + target: (Optional.) The execution engine to connect to. + Defaults to using an in-process engine. At present, no value + other than the empty string is supported. + graph: (Optional.) The `Graph` to be launched (described above). """ super(InteractiveSession, self).__init__(target, graph) self._default_session = self.as_default() diff --git a/tensorflow/python/framework/gen_docs_combined.py b/tensorflow/python/framework/gen_docs_combined.py index 8256acc514049d..ed9c2e24a87ab9 100644 --- a/tensorflow/python/framework/gen_docs_combined.py +++ b/tensorflow/python/framework/gen_docs_combined.py @@ -79,8 +79,7 @@ def library(name, title, module=None, **args): "all_candidate_sampler", "embedding_lookup_sparse"], prefix=PREFIX_TEXT), - library('client', "Running Graphs", client_lib, - exclude_symbols=["InteractiveSession"]), + library('client', "Running Graphs", client_lib), library("train", "Training", tf.train, exclude_symbols=["Feature", "Features", "BytesList", "FloatList", "Int64List", "Example", "InferenceExample", diff --git a/tensorflow/tools/docker/Dockerfile.cpu b/tensorflow/tools/docker/Dockerfile.cpu index da0a7abb18bdb5..21d7b55c702ba0 100644 --- a/tensorflow/tools/docker/Dockerfile.cpu +++ b/tensorflow/tools/docker/Dockerfile.cpu @@ -56,17 +56,13 @@ RUN mkdir /bazel && \ # Download and build TensorFlow. WORKDIR /tensorflow -# We can't clone the TF git repo yet, because of permissions issues. -# RUN git clone https://tensorflow.googlesource.com/ -# Instead, we manually copy it in: +# Add in the source tree COPY tensorflow /tensorflow # Now we build RUN bazel clean && \ - bazel build -c opt tensorflow/tools/docker:simple_console + bazel build -c opt tensorflow/tools/pip_package:build_pip_package && \ + bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/pip && \ + pip install /tmp/pip/tensorflow-*.whl -ENV PYTHONPATH=/tensorflow/bazel-bin/tensorflow/tools/docker/simple_console.runfiles/:$PYTHONPATH - -# We want to start Jupyter in the directory with our getting started -# tutorials. -WORKDIR /notebooks +WORKDIR /root