Skip to content

Commit

Permalink
[examples] sequence and revise notebooks
Browse files Browse the repository at this point in the history
- combine classification + filter visualization
- order by classification, learning LeNet, brewing logreg, and
  fine-tuning to flickr style
- improve flow of content in classification + filter visualization
- include solver needed for learning LeNet
- edit notebook descriptions for site catalogue
  • Loading branch information
shelhamer committed Jun 30, 2015
1 parent b8cc297 commit a6cb8ec
Show file tree
Hide file tree
Showing 19 changed files with 22,296 additions and 26,009 deletions.
13,187 changes: 13,187 additions & 0 deletions examples/00-classification.ipynb

Large diffs are not rendered by default.

4,414 changes: 2,231 additions & 2,183 deletions examples/python_solving.ipynb → examples/01-learning-lenet.ipynb

Large diffs are not rendered by default.

5,771 changes: 5,771 additions & 0 deletions examples/02-brewing-logreg.ipynb

Large diffs are not rendered by default.

947 changes: 947 additions & 0 deletions examples/03-fine-tuning.ipynb

Large diffs are not rendered by default.

951 changes: 0 additions & 951 deletions examples/Finetune with Flickr Style Data.ipynb

This file was deleted.

3,342 changes: 0 additions & 3,342 deletions examples/classification.ipynb

This file was deleted.

2 changes: 1 addition & 1 deletion examples/detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -8385,7 +8385,7 @@
"pygments_lexer": "ipython2",
"version": "2.7.9"
},
"priority": 3
"priority": 6
},
"nbformat": 4,
"nbformat_minor": 0
Expand Down
13,214 changes: 0 additions & 13,214 deletions examples/filter_visualization.ipynb

This file was deleted.

6,290 changes: 0 additions & 6,290 deletions examples/hdf5_classification.ipynb

This file was deleted.

54 changes: 54 additions & 0 deletions examples/hdf5_classification/nonlinear_auto_test.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
hdf5_data_param {
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
inner_product_param {
num_output: 40
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
54 changes: 54 additions & 0 deletions examples/hdf5_classification/nonlinear_auto_train.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
hdf5_data_param {
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
inner_product_param {
num_output: 40
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
15 changes: 15 additions & 0 deletions examples/hdf5_classification/nonlinear_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt"
test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt"
test_iter: 250
test_interval: 1000
base_lr: 0.01
lr_policy: "step"
gamma: 0.1
stepsize: 5000
display: 1000
max_iter: 10000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "examples/hdf5_classification/data/train"
solver_mode: CPU
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ layer {
phase: TRAIN
}
hdf5_data_param {
source: "hdf5_classification/data/train.txt"
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
Expand All @@ -21,7 +21,7 @@ layer {
phase: TEST
}
hdf5_data_param {
source: "hdf5_classification/data/test.txt"
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
Expand All @@ -41,8 +41,7 @@ layer {
inner_product_param {
num_output: 40
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down Expand Up @@ -72,8 +71,7 @@ layer {
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down
5 changes: 3 additions & 2 deletions examples/hdf5_classification/solver.prototxt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
net: "hdf5_classification/train_val.prototxt"
train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
test_iter: 250
test_interval: 1000
base_lr: 0.01
Expand All @@ -10,5 +11,5 @@ max_iter: 10000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "hdf5_classification/data/train"
snapshot_prefix: "examples/hdf5_classification/data/train"
solver_mode: CPU
14 changes: 0 additions & 14 deletions examples/hdf5_classification/solver2.prototxt

This file was deleted.

7 changes: 3 additions & 4 deletions examples/hdf5_classification/train_val.prototxt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ layer {
phase: TRAIN
}
hdf5_data_param {
source: "hdf5_classification/data/train.txt"
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
Expand All @@ -21,7 +21,7 @@ layer {
phase: TEST
}
hdf5_data_param {
source: "hdf5_classification/data/test.txt"
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
Expand All @@ -41,8 +41,7 @@ layer {
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "xavier"
}
bias_filler {
type: "constant"
Expand Down
24 changes: 24 additions & 0 deletions examples/mnist/lenet_auto_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# The train/test net protocol buffer definition
train_net: "examples/mnist/lenet_auto_train.prototxt"
test_net: "examples/mnist/lenet_auto_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "examples/mnist/lenet"
2 changes: 1 addition & 1 deletion examples/net_surgery.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6884,7 +6884,7 @@
}
],
"metadata": {
"description": "How to do net surgery and manually change model parameters, making a fully-convolutional classifier for dense feature extraction.",
"description": "How to do net surgery and manually change model parameters for custom use.",
"example_name": "Editing model parameters",
"include_in_docs": true,
"kernelspec": {
Expand Down
2 changes: 1 addition & 1 deletion examples/siamese/mnist_siamese.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1902,7 +1902,7 @@
"pygments_lexer": "ipython2",
"version": "2.7.9"
},
"priority": 6
"priority": 7
},
"nbformat": 4,
"nbformat_minor": 0
Expand Down

0 comments on commit a6cb8ec

Please sign in to comment.