Skip to content

Commit

Permalink
Initial infrastructure setup and Python 2&3 fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
Ludwig Schubert committed Jan 31, 2018
1 parent f6b802b commit a6e81c3
Show file tree
Hide file tree
Showing 21 changed files with 297 additions and 128 deletions.
15 changes: 15 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# http://editorconfig.org

root = true

[*]
indent_style = space
indent_size = 2
insert_final_newline = true
trim_trailing_whitespace = true
end_of_line = lf
charset = utf-8

# Docstrings and comments use max_line_length = 79
[*.py]
max_line_length = 79
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
**.pyc
dist
.eggs
.cache
.tox
.pytest_cache
*.egg-info
10 changes: 10 additions & 0 deletions MANIFEST
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# file GENERATED by distutils, do NOT edit
setup.cfg
setup.py
lucid/__init__.py
lucid/optvis/__init__.py
lucid/optvis/objectives.py
lucid/optvis/param.py
lucid/optvis/render.py
lucid/optvis/resize_bilinear_nd.py
lucid/optvis/transform.py
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
include tox.ini
16 changes: 0 additions & 16 deletions lucid/misc/__init__.py
Original file line number Diff line number Diff line change
@@ -1,16 +0,0 @@
# Copyright 2018 The Deepviz Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import show
79 changes: 79 additions & 0 deletions lucid/misc/uio.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
"""UnifiedIO provides wrappers around IO functions.
This is meant to make transparent which data store we are working with.
"""

from __future__ import absolute_import, division, print_function
from future.standard_library import install_aliases
install_aliases()

import os
import re
from urllib.parse import urlparse, urljoin
from future.moves.urllib.request import urlopen
from tensorflow import gfile
from tempfile import gettempdir


def read(url):
"""Read from any URL.
Internally differentiates between URLs supported by tf.gfile, such as URLs
with the Google Cloud Storage scheme ('gs://...') or local paths, and HTTP
URLs. This way users don't need to know about the underlying fetch mechanism.
Args:
url: a URL including scheme or a local path
Returns:
All bytes form the specified resource if it could be reached.
"""
scheme = urlparse(url).scheme
if scheme in ('http', 'https'):
return read_web_url(url)
elif scheme == 'gs':
return read_gcs_url(url)
else:
return read_path(url)


RESERVED_PATH_CHARS = re.compile("[^a-zA-Z0-9]")
def read_and_cache(url):
local_name = RESERVED_PATH_CHARS.sub('_', url)
local_path = os.path.join(gettempdir(), local_name)
if os.path.exists(local_path):
print("Trying cached file '{}'.".format(local_path))
return read_path(local_path)
else:
print("Caching URL '{}' locally at '{}'.".format(url, local_path))
result = read(url)
save(result, local_path)
return result


def read_web_url(url):
print('read_web_url', url)
return urlopen(url).read()


def read_gcs_url(url):
# TODO: transparantly allow authenticated access through storage API
_, resource_name = url.split('://')
base_url = 'https://storage.googleapis.com/'
url = urljoin(base_url, resource_name)
return read_web_url(url)


def read_path(path):
with gfile.Open(path, 'rb') as handle:
result = handle.read()
return result


def save(bytes, url):
assert not is_web_url(url)
save_to_path(bytes, url)


def save_to_path(bytes, path):
with gfile.Open(path, 'wb') as handle:
handle.write(bytes)
18 changes: 0 additions & 18 deletions lucid/modelzoo/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +0,0 @@
# Copyright 2018 The Deepviz Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================



from vision_models import *
16 changes: 3 additions & 13 deletions lucid/modelzoo/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,25 +13,15 @@
# limitations under the License.
# ==============================================================================

from __future__ import absolute_import, division, print_function

import urllib2
import tensorflow as tf
from lucid.misc import uio


def read_resource(path):
if '://' in path:
protocol, resource = path.split('://')
if protocol == 'gs':
url = 'https://storage.googleapis.com/' + resource
else:
url = resource
return urllib2.urlopen(url).read()
else:
return tf.gfile.GFile(path).read()

def load_graphdef(model_url, reset_device=True):
"""Load GraphDef from a binary proto file."""
graph_def_str = read_resource(model_url)
graph_def_str = uio.read_and_cache(model_url)
graph_def = tf.GraphDef.FromString(graph_def_str)
if reset_device:
for n in graph_def.node:
Expand Down
13 changes: 7 additions & 6 deletions lucid/modelzoo/vision_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
# limitations under the License.
# ==============================================================================

from __future__ import absolute_import, division, print_function

import tensorflow as tf

from util import load_graphdef, forget_xy
from lucid.modelzoo.util import load_graphdef, forget_xy

class Model(object):
"""Base pretrained model importer."""
Expand All @@ -31,6 +31,9 @@ def __init__(self):
def load_graphdef(self):
self.graph_def = load_graphdef(self.model_path)

def post_import(self, scope):
pass

def create_input(self, t_input=None, forget_xy_shape=True):
"""Create input tensor."""
if t_input is None:
Expand All @@ -52,7 +55,5 @@ def import_graph(self, t_input=None, scope='import', forget_xy_shape=True):
'importing multiple instances of the model.') % scope
t_input, t_prep_input = self.create_input(t_input, forget_xy_shape)
tf.import_graph_def(
self.graph_def, {self.input_name: t_prep_input},
name=scope)


self.graph_def, {self.input_name: t_prep_input}, name=scope)
self.post_import(scope)
23 changes: 22 additions & 1 deletion lucid/modelzoo/vision_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,32 @@
# limitations under the License.
# ==============================================================================

from __future__ import absolute_import, division, print_function

import tensorflow as tf
from lucid.modelzoo.vision_base import Model


def populate_inception_bottlenecks(scope):
"""Add Inception bottlenecks and their pre-Relu versions to the graph."""
graph = tf.get_default_graph()
for op in graph.get_operations():
if op.name.startswith(scope+'/') and 'Concat' in op.type:
name = op.name.split('/')[1]
pre_relus = []
for tower in op.inputs[1:]:
if tower.op.type == 'Relu':
tower = tower.op.inputs[0]
pre_relus.append(tower)
concat_name = scope + '/' + name + '_pre_relu'
_ = tf.concat(pre_relus, -1, name=concat_name)

from vision_base import Model

class InceptionV1(Model):
model_path = 'gs://modelzoo/InceptionV1.pb'
image_shape = [224, 224, 3]
image_value_range = (-117, 255-117)
input_name = 'input:0'

def post_import(self, scope):
populate_inception_bottlenecks(scope)
20 changes: 0 additions & 20 deletions lucid/optvis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,20 +0,0 @@
# Copyright 2018 The Deepviz Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================


import objectives
import param
import transform
import render
6 changes: 4 additions & 2 deletions lucid/optvis/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
actually constructed within its graph/session.
"""

from __future__ import absolute_import, division, print_function

from decorator import decorator
import numpy as np
import tensorflow as tf
Expand Down Expand Up @@ -134,9 +136,9 @@ def inner(T):
layer = T(layer_name)
shape = tf.shape(layer)
if x is None:
x_ = shape[1] / 2
x_ = shape[1] // 2
if y is None:
y_ = shape[2] / 2
y_ = shape[2] // 2
if batch is None:
return layer[:, x_, y_, channel_n]
else:
Expand Down
2 changes: 1 addition & 1 deletion lucid/optvis/param.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
import numpy as np
import tensorflow as tf

from resize_bilinear_nd import resize_bilinear_nd
from lucid.optvis.resize_bilinear_nd import resize_bilinear_nd


# def fft_tensor(shape, scale_freqs=True):
Expand Down
Loading

0 comments on commit a6e81c3

Please sign in to comment.