Skip to content

Commit

Permalink
Merge pull request BVLC#2667 from BVLC/tutorial
Browse files Browse the repository at this point in the history
bundle CVPR15 tutorial notebooks
  • Loading branch information
shelhamer committed Jun 30, 2015
2 parents 1d6cac2 + 7003d1b commit fd44a91
Show file tree
Hide file tree
Showing 22 changed files with 25,367 additions and 22,875 deletions.
13,187 changes: 13,187 additions & 0 deletions examples/00-classification.ipynb

Large diffs are not rendered by default.

5,196 changes: 5,196 additions & 0 deletions examples/01-learning-lenet.ipynb

Large diffs are not rendered by default.

5,771 changes: 5,771 additions & 0 deletions examples/02-brewing-logreg.ipynb

Large diffs are not rendered by default.

947 changes: 947 additions & 0 deletions examples/03-fine-tuning.ipynb

Large diffs are not rendered by default.

3,342 changes: 0 additions & 3,342 deletions examples/classification.ipynb

This file was deleted.

2 changes: 1 addition & 1 deletion examples/detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -8385,7 +8385,7 @@
"pygments_lexer": "ipython2",
"version": "2.7.9"
},
"priority": 3
"priority": 6
},
"nbformat": 4,
"nbformat_minor": 0
Expand Down
13,214 changes: 0 additions & 13,214 deletions examples/filter_visualization.ipynb

This file was deleted.

8 changes: 8 additions & 0 deletions examples/finetune_flickr_style/assemble_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing

# Flickr returns a special image if the request is unavailable.
Expand All @@ -27,6 +28,7 @@ def download_image(args_tuple):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
Expand All @@ -48,6 +50,10 @@ def download_image(args_tuple):
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)

args = parser.parse_args()
np.random.seed(args.seed)
Expand All @@ -56,6 +62,8 @@ def download_image(args_tuple):
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]

Expand Down
6,290 changes: 0 additions & 6,290 deletions examples/hdf5_classification.ipynb

This file was deleted.

54 changes: 54 additions & 0 deletions examples/hdf5_classification/nonlinear_auto_test.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
hdf5_data_param {
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
inner_product_param {
num_output: 40
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
54 changes: 54 additions & 0 deletions examples/hdf5_classification/nonlinear_auto_train.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
hdf5_data_param {
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
inner_product_param {
num_output: 40
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
15 changes: 15 additions & 0 deletions examples/hdf5_classification/nonlinear_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt"
test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt"
test_iter: 250
test_interval: 1000
base_lr: 0.01
lr_policy: "step"
gamma: 0.1
stepsize: 5000
display: 1000
max_iter: 10000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "examples/hdf5_classification/data/train"
solver_mode: CPU
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ layer {
phase: TRAIN
}
hdf5_data_param {
source: "hdf5_classification/data/train.txt"
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
Expand All @@ -21,7 +21,7 @@ layer {
phase: TEST
}
hdf5_data_param {
source: "hdf5_classification/data/test.txt"
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
Expand All @@ -41,8 +41,7 @@ layer {
inner_product_param {
num_output: 40
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down Expand Up @@ -72,8 +71,7 @@ layer {
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down
5 changes: 3 additions & 2 deletions examples/hdf5_classification/solver.prototxt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
net: "hdf5_classification/train_val.prototxt"
train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
test_iter: 250
test_interval: 1000
base_lr: 0.01
Expand All @@ -10,5 +11,5 @@ max_iter: 10000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "hdf5_classification/data/train"
snapshot_prefix: "examples/hdf5_classification/data/train"
solver_mode: CPU
14 changes: 0 additions & 14 deletions examples/hdf5_classification/solver2.prototxt

This file was deleted.

7 changes: 3 additions & 4 deletions examples/hdf5_classification/train_val.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ layer {
phase: TRAIN
}
hdf5_data_param {
source: "hdf5_classification/data/train.txt"
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
Expand All @@ -21,7 +21,7 @@ layer {
phase: TEST
}
hdf5_data_param {
source: "hdf5_classification/data/test.txt"
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
Expand All @@ -41,8 +41,7 @@ layer {
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down
24 changes: 24 additions & 0 deletions examples/mnist/lenet_auto_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# The train/test net protocol buffer definition
train_net: "examples/mnist/lenet_auto_train.prototxt"
test_net: "examples/mnist/lenet_auto_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "examples/mnist/lenet"
2 changes: 1 addition & 1 deletion examples/net_surgery.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6884,7 +6884,7 @@
}
],
"metadata": {
"description": "How to do net surgery and manually change model parameters, making a fully-convolutional classifier for dense feature extraction.",
"description": "How to do net surgery and manually change model parameters for custom use.",
"example_name": "Editing model parameters",
"include_in_docs": true,
"kernelspec": {
Expand Down
37 changes: 37 additions & 0 deletions examples/pycaffe/layers/pyloss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import caffe
import numpy as np


class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""

def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")

def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)

def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.

def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
60 changes: 60 additions & 0 deletions examples/pycaffe/linreg.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
name: 'LinearRegressionExample'
# define a simple network for linear regression on dummy data
# that computes the loss by a PythonLayer.
layer {
type: 'DummyData'
name: 'x'
top: 'x'
dummy_data_param {
shape: { dim: 10 dim: 3 dim: 2 }
data_filler: { type: 'gaussian' }
}
}
layer {
type: 'DummyData'
name: 'y'
top: 'y'
dummy_data_param {
shape: { dim: 10 dim: 3 dim: 2 }
data_filler: { type: 'gaussian' }
}
}
# include InnerProduct layers for parameters
# so the net will need backward
layer {
type: 'InnerProduct'
name: 'ipx'
top: 'ipx'
bottom: 'x'
inner_product_param {
num_output: 10
weight_filler { type: 'xavier' }
}
}
layer {
type: 'InnerProduct'
name: 'ipy'
top: 'ipy'
bottom: 'y'
inner_product_param {
num_output: 10
weight_filler { type: 'xavier' }
}
}
layer {
type: 'Python'
name: 'loss'
top: 'loss'
bottom: 'ipx'
bottom: 'ipy'
python_param {
# the module name -- usually the filename -- that needs to be in $PYTHONPATH
module: 'pyloss'
# the layer name -- the class name in the module
layer: 'EuclideanLossLayer'
}
# set loss weight so Caffe knows this is a loss layer.
# since PythonLayer inherits directly from Layer, this isn't automatically
# known to Caffe
loss_weight: 1
}
Loading

0 comments on commit fd44a91

Please sign in to comment.