diff --git a/examples/cifar10/cifar10_full.prototxt b/examples/cifar10/cifar10_full.prototxt index 8bbd30004fd..c16f7dca49f 100644 --- a/examples/cifar10/cifar10_full.prototxt +++ b/examples/cifar10/cifar10_full.prototxt @@ -6,13 +6,17 @@ input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 32 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -20,9 +24,9 @@ layers { stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -31,31 +35,35 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -63,15 +71,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -80,21 +88,21 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -104,15 +112,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -121,22 +129,26 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 250 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 250 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 10 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip1" top: "prob" } diff --git a/examples/cifar10/cifar10_full_train_test.prototxt b/examples/cifar10/cifar10_full_train_test.prototxt index 38cc04f4a68..d45fc61e120 100644 --- a/examples/cifar10/cifar10_full_train_test.prototxt +++ b/examples/cifar10/cifar10_full_train_test.prototxt @@ -1,41 +1,49 @@ name: "CIFAR10_full" -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_train_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TRAIN } } -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TEST + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_test_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -50,9 +58,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -61,31 +69,35 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -100,15 +112,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -117,21 +129,21 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -148,15 +160,15 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -165,15 +177,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 250 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 250 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 10 weight_filler { @@ -185,17 +201,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip1" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip1" bottom: "label" top: "loss" diff --git a/examples/cifar10/cifar10_quick.prototxt b/examples/cifar10/cifar10_quick.prototxt index 505158f7a34..1ad190e185f 100644 --- a/examples/cifar10/cifar10_quick.prototxt +++ b/examples/cifar10/cifar10_quick.prototxt @@ -4,13 +4,17 @@ input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 32 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -18,9 +22,9 @@ layers { stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -29,19 +33,23 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -49,15 +57,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -66,13 +74,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "pool2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 64 pad: 2 @@ -80,15 +92,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -97,31 +109,39 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 64 } } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip2" top: "prob" } diff --git a/examples/cifar10/cifar10_quick_train_test.prototxt b/examples/cifar10/cifar10_quick_train_test.prototxt index 074bb0011e3..2317739353e 100644 --- a/examples/cifar10/cifar10_quick_train_test.prototxt +++ b/examples/cifar10/cifar10_quick_train_test.prototxt @@ -1,41 +1,49 @@ name: "CIFAR10_quick" -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_train_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TRAIN } } -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TEST + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_test_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -50,9 +58,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -61,19 +69,23 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -88,15 +100,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -105,13 +117,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "pool2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 64 pad: 2 @@ -126,15 +142,15 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -143,13 +159,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 64 weight_filler { @@ -161,13 +181,17 @@ layers { } } } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -179,17 +203,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip2" bottom: "label" top: "loss" diff --git a/examples/feature_extraction/imagenet_val.prototxt b/examples/feature_extraction/imagenet_val.prototxt index 83fe8c1a08d..b0a1cefa00e 100644 --- a/examples/feature_extraction/imagenet_val.prototxt +++ b/examples/feature_extraction/imagenet_val.prototxt @@ -1,24 +1,24 @@ name: "CaffeNet" -layers { +layer { name: "data" - type: IMAGE_DATA + type: "ImageData" top: "data" top: "label" + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } image_data_param { source: "examples/_temp/file_list.txt" batch_size: 50 new_height: 256 new_width: 256 } - transform_param { - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: false - } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -27,15 +27,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -44,9 +44,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -55,9 +55,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -67,15 +67,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -84,9 +84,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -95,9 +95,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -106,15 +106,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -124,15 +124,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -142,15 +142,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -159,79 +159,79 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8" inner_product_param { num_output: 1000 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8" top: "prob" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "prob" bottom: "label" top: "accuracy" } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8" bottom: "label" top: "loss" diff --git a/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt b/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt index 5cd605bbf11..9dd2120acad 100644 --- a/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt +++ b/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt @@ -1,9 +1,17 @@ name: "CaffeNet" -layers { +layer { name: "data" - type: WINDOW_DATA + type: "WindowData" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } window_data_param { source: "examples/finetune_pascal_detection/window_file_2007_trainval.txt" batch_size: 128 @@ -13,18 +21,20 @@ layers { context_pad: 16 crop_mode: "warp" } +} +layer { + name: "data" + type: "WindowData" + top: "data" + top: "label" + include { + phase: TEST + } transform_param { mirror: true crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" } - include: { phase: TRAIN } -} -layers { - name: "data" - type: WINDOW_DATA - top: "data" - top: "label" window_data_param { source: "examples/finetune_pascal_detection/window_file_2007_test.txt" batch_size: 128 @@ -34,22 +44,20 @@ layers { context_pad: 16 crop_mode: "warp" } - transform_param { - mirror: true - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -64,15 +72,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -81,9 +89,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -92,15 +100,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -116,15 +128,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -133,9 +145,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -144,15 +156,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -167,21 +183,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -197,21 +217,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -227,15 +251,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -244,15 +268,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -265,30 +293,34 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -301,30 +333,34 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8_pascal" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8_pascal" - blobs_lr: 10 - blobs_lr: 20 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } inner_product_param { num_output: 21 weight_filler { @@ -337,17 +373,19 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8_pascal" bottom: "label" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc8_pascal" bottom: "label" top: "accuracy" - include { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/hdf5_classification/train_val.prototxt b/examples/hdf5_classification/train_val.prototxt index b55b6644b17..b9ccc1a93ec 100644 --- a/examples/hdf5_classification/train_val.prototxt +++ b/examples/hdf5_classification/train_val.prototxt @@ -1,35 +1,43 @@ name: "LogisticRegressionNet" -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TRAIN + } hdf5_data_param { source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } - include: { phase: TRAIN } } -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TEST + } hdf5_data_param { source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } - include: { phase: TEST } } -layers { +layer { name: "fc1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "data" top: "fc1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 2 weight_filler { @@ -42,18 +50,20 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc1" bottom: "label" top: "loss" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc1" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/hdf5_classification/train_val2.prototxt b/examples/hdf5_classification/train_val2.prototxt index b6a75650ad3..f9ef731fff9 100644 --- a/examples/hdf5_classification/train_val2.prototxt +++ b/examples/hdf5_classification/train_val2.prototxt @@ -1,35 +1,43 @@ name: "LogisticRegressionNet" -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TRAIN + } hdf5_data_param { source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } - include: { phase: TRAIN } } -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TEST + } hdf5_data_param { source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } - include: { phase: TEST } } -layers { +layer { name: "fc1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "data" top: "fc1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 40 weight_filler { @@ -42,21 +50,25 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "fc1" top: "fc1" } -layers { +layer { name: "fc2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc1" top: "fc2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 2 weight_filler { @@ -69,18 +81,20 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc2" bottom: "label" top: "loss" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/imagenet/bvlc_caffenet_full_conv.prototxt b/examples/imagenet/bvlc_caffenet_full_conv.prototxt index 395b0f0162f..7b22bfa1404 100644 --- a/examples/imagenet/bvlc_caffenet_full_conv.prototxt +++ b/examples/imagenet/bvlc_caffenet_full_conv.prototxt @@ -5,9 +5,9 @@ input_dim: 1 input_dim: 3 input_dim: 451 input_dim: 451 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -16,15 +16,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -33,9 +33,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -44,9 +44,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -56,15 +56,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -73,9 +73,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -84,9 +84,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -95,15 +95,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -113,15 +113,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -131,15 +131,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -148,9 +148,9 @@ layers { stride: 2 } } -layers { +layer { name: "fc6-conv" - type: CONVOLUTION + type: "Convolution" bottom: "pool5" top: "fc6-conv" convolution_param { @@ -158,24 +158,24 @@ layers { kernel_size: 6 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6-conv" top: "fc6-conv" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6-conv" top: "fc6-conv" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7-conv" - type: CONVOLUTION + type: "Convolution" bottom: "fc6-conv" top: "fc7-conv" convolution_param { @@ -183,24 +183,24 @@ layers { kernel_size: 1 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7-conv" top: "fc7-conv" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7-conv" top: "fc7-conv" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8-conv" - type: CONVOLUTION + type: "Convolution" bottom: "fc7-conv" top: "fc8-conv" convolution_param { @@ -208,9 +208,9 @@ layers { kernel_size: 1 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8-conv" top: "prob" } diff --git a/examples/mnist/lenet.prototxt b/examples/mnist/lenet.prototxt index 491fad1b1c0..cb42610fe1e 100644 --- a/examples/mnist/lenet.prototxt +++ b/examples/mnist/lenet.prototxt @@ -4,13 +4,17 @@ input_dim: 64 input_dim: 1 input_dim: 28 input_dim: 28 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -23,9 +27,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -34,13 +38,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -53,9 +61,9 @@ layers { } } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -64,13 +72,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -81,19 +93,23 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -104,9 +120,9 @@ layers { } } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip2" top: "prob" } diff --git a/examples/mnist/lenet_train_test.prototxt b/examples/mnist/lenet_train_test.prototxt index 2bd960b56aa..b18fc26cfd8 100644 --- a/examples/mnist/lenet_train_test.prototxt +++ b/examples/mnist/lenet_train_test.prototxt @@ -1,42 +1,49 @@ name: "LeNet" -layers { +layer { name: "mnist" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/mnist/mnist_train_lmdb" - backend: LMDB - batch_size: 64 + include { + phase: TRAIN } transform_param { scale: 0.00390625 } - include: { phase: TRAIN } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } } -layers { +layer { name: "mnist" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/mnist/mnist_test_lmdb" - backend: LMDB - batch_size: 100 + include { + phase: TEST } transform_param { scale: 0.00390625 } - include: { phase: TEST } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } } - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -49,9 +56,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -60,13 +67,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -79,9 +90,9 @@ layers { } } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -90,13 +101,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -107,19 +122,23 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -130,17 +149,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip2" bottom: "label" top: "loss" diff --git a/examples/mnist/mnist_autoencoder.prototxt b/examples/mnist/mnist_autoencoder.prototxt index 0b33781a16f..563c7c91e52 100644 --- a/examples/mnist/mnist_autoencoder.prototxt +++ b/examples/mnist/mnist_autoencoder.prototxt @@ -1,67 +1,73 @@ name: "MNISTAutoencoder" -layers { - top: "data" +layer { name: "data" - type: DATA - data_param { - source: "examples/mnist/mnist_train_lmdb" - backend: LMDB - batch_size: 100 + type: "Data" + top: "data" + include { + phase: TRAIN } transform_param { scale: 0.0039215684 } - include: { phase: TRAIN } -} -layers { - top: "data" - name: "data" - type: DATA data_param { source: "examples/mnist/mnist_train_lmdb" - backend: LMDB batch_size: 100 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + include { + phase: TEST + stage: "test-on-train" } transform_param { scale: 0.0039215684 } - include: { - phase: TEST - stage: 'test-on-train' + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 100 + backend: LMDB } } -layers { - top: "data" +layer { name: "data" - type: DATA - data_param { - source: "examples/mnist/mnist_test_lmdb" - backend: LMDB - batch_size: 100 + type: "Data" + top: "data" + include { + phase: TEST + stage: "test-on-test" } transform_param { scale: 0.0039215684 } - include: { - phase: TEST - stage: 'test-on-test' + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB } } -layers { +layer { + name: "flatdata" + type: "Flatten" bottom: "data" top: "flatdata" - name: "flatdata" - type: FLATTEN } -layers { +layer { + name: "encode1" + type: "InnerProduct" bottom: "data" top: "encode1" - name: "encode1" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -75,21 +81,25 @@ layers { } } } -layers { +layer { + name: "encode1neuron" + type: "Sigmoid" bottom: "encode1" top: "encode1neuron" - name: "encode1neuron" - type: SIGMOID } -layers { +layer { + name: "encode2" + type: "InnerProduct" bottom: "encode1neuron" top: "encode2" - name: "encode2" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 500 weight_filler { @@ -103,21 +113,25 @@ layers { } } } -layers { +layer { + name: "encode2neuron" + type: "Sigmoid" bottom: "encode2" top: "encode2neuron" - name: "encode2neuron" - type: SIGMOID } -layers { +layer { + name: "encode3" + type: "InnerProduct" bottom: "encode2neuron" top: "encode3" - name: "encode3" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 250 weight_filler { @@ -131,21 +145,25 @@ layers { } } } -layers { +layer { + name: "encode3neuron" + type: "Sigmoid" bottom: "encode3" top: "encode3neuron" - name: "encode3neuron" - type: SIGMOID } -layers { +layer { + name: "encode4" + type: "InnerProduct" bottom: "encode3neuron" top: "encode4" - name: "encode4" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 30 weight_filler { @@ -159,15 +177,19 @@ layers { } } } -layers { +layer { + name: "decode4" + type: "InnerProduct" bottom: "encode4" top: "decode4" - name: "decode4" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 250 weight_filler { @@ -181,21 +203,25 @@ layers { } } } -layers { +layer { + name: "decode4neuron" + type: "Sigmoid" bottom: "decode4" top: "decode4neuron" - name: "decode4neuron" - type: SIGMOID } -layers { +layer { + name: "decode3" + type: "InnerProduct" bottom: "decode4neuron" top: "decode3" - name: "decode3" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 500 weight_filler { @@ -209,21 +235,25 @@ layers { } } } -layers { +layer { + name: "decode3neuron" + type: "Sigmoid" bottom: "decode3" top: "decode3neuron" - name: "decode3neuron" - type: SIGMOID } -layers { +layer { + name: "decode2" + type: "InnerProduct" bottom: "decode3neuron" top: "decode2" - name: "decode2" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -237,21 +267,25 @@ layers { } } } -layers { +layer { + name: "decode2neuron" + type: "Sigmoid" bottom: "decode2" top: "decode2neuron" - name: "decode2neuron" - type: SIGMOID } -layers { +layer { + name: "decode1" + type: "InnerProduct" bottom: "decode2neuron" top: "decode1" - name: "decode1" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 784 weight_filler { @@ -265,25 +299,25 @@ layers { } } } -layers { +layer { + name: "loss" + type: "SigmoidCrossEntropyLoss" bottom: "decode1" bottom: "flatdata" top: "cross_entropy_loss" - name: "loss" - type: SIGMOID_CROSS_ENTROPY_LOSS loss_weight: 1 } -layers { +layer { + name: "decode1neuron" + type: "Sigmoid" bottom: "decode1" top: "decode1neuron" - name: "decode1neuron" - type: SIGMOID } -layers { +layer { + name: "loss" + type: "EuclideanLoss" bottom: "decode1neuron" bottom: "flatdata" top: "l2_error" - name: "loss" - type: EUCLIDEAN_LOSS loss_weight: 0 } diff --git a/examples/siamese/mnist_siamese.prototxt b/examples/siamese/mnist_siamese.prototxt index 8dd42e9c1b5..0e903f85909 100644 --- a/examples/siamese/mnist_siamese.prototxt +++ b/examples/siamese/mnist_siamese.prototxt @@ -4,23 +4,26 @@ input_dim: 10000 input_dim: 1 input_dim: 28 input_dim: 28 - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -29,22 +32,26 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 stride: 1 } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -53,42 +60,53 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 } } - -layers { +layer { name: "feat" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2" top: "feat" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 2 } diff --git a/examples/siamese/mnist_siamese_train_test.prototxt b/examples/siamese/mnist_siamese_train_test.prototxt index 92361c31dc7..8ff864f556f 100644 --- a/examples/siamese/mnist_siamese_train_test.prototxt +++ b/examples/siamese/mnist_siamese_train_test.prototxt @@ -1,50 +1,60 @@ name: "mnist_siamese_train_test" -layers { +layer { name: "pair_data" - type: DATA + type: "Data" top: "pair_data" top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } data_param { source: "examples/siamese/mnist_siamese_train_leveldb" - scale: 0.00390625 batch_size: 64 } - include: { phase: TRAIN } } -layers { +layer { name: "pair_data" - type: DATA + type: "Data" top: "pair_data" top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } data_param { source: "examples/siamese/mnist_siamese_test_leveldb" - scale: 0.00390625 batch_size: 100 } - include: { phase: TEST } } -layers { - name: "slice_pair" - type: SLICE - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } } - - - - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -56,12 +66,10 @@ layers { type: "constant" } } - param: "conv1_w" - param: "conv1_b" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -70,13 +78,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -88,12 +102,10 @@ layers { type: "constant" } } - param: "conv2_w" - param: "conv2_b" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -102,13 +114,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -118,22 +136,26 @@ layers { type: "constant" } } - param: "ip1_w" - param: "ip1_b" } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -143,17 +165,20 @@ layers { type: "constant" } } - param: "ip2_w" - param: "ip2_b" } - -layers { +layer { name: "feat" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2" top: "feat" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } inner_product_param { num_output: 2 weight_filler { @@ -163,19 +188,20 @@ layers { type: "constant" } } - param: "feat_w" - param: "feat_b" } - - - -layers { +layer { name: "conv1_p" - type: CONVOLUTION + type: "Convolution" bottom: "data_p" top: "conv1_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -187,12 +213,10 @@ layers { type: "constant" } } - param: "conv1_w" - param: "conv1_b" } -layers { +layer { name: "pool1_p" - type: POOLING + type: "Pooling" bottom: "conv1_p" top: "pool1_p" pooling_param { @@ -201,13 +225,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv2_p" - type: CONVOLUTION + type: "Convolution" bottom: "pool1_p" top: "conv2_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -219,12 +249,10 @@ layers { type: "constant" } } - param: "conv2_w" - param: "conv2_b" } -layers { +layer { name: "pool2_p" - type: POOLING + type: "Pooling" bottom: "conv2_p" top: "pool2_p" pooling_param { @@ -233,13 +261,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2_p" top: "ip1_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -249,22 +283,26 @@ layers { type: "constant" } } - param: "ip1_w" - param: "ip1_b" } -layers { +layer { name: "relu1_p" - type: RELU + type: "ReLU" bottom: "ip1_p" top: "ip1_p" } -layers { +layer { name: "ip2_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1_p" top: "ip2_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -274,17 +312,20 @@ layers { type: "constant" } } - param: "ip2_w" - param: "ip2_b" } - -layers { +layer { name: "feat_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2_p" top: "feat_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } inner_product_param { num_output: 2 weight_filler { @@ -294,20 +335,15 @@ layers { type: "constant" } } - param: "feat_w" - param: "feat_b" } - - - -layers { - name: "loss" - type: CONTRASTIVE_LOSS - contrastive_loss_param { - margin: 1.0 - } - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } } diff --git a/models/bvlc_alexnet/deploy.prototxt b/models/bvlc_alexnet/deploy.prototxt index d010753f3fc..ced055b85d0 100644 --- a/models/bvlc_alexnet/deploy.prototxt +++ b/models/bvlc_alexnet/deploy.prototxt @@ -4,241 +4,273 @@ input_dim: 10 input_dim: 3 input_dim: 227 input_dim: 227 -layers { +layer { name: "conv1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 stride: 4 } - bottom: "data" - top: "conv1" } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" + bottom: "conv1" + top: "norm1" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } - bottom: "conv1" - top: "norm1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" + bottom: "norm1" + top: "pool1" pooling_param { pool: MAX kernel_size: 3 stride: 2 } - bottom: "norm1" - top: "pool1" } -layers { +layer { name: "conv2" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 kernel_size: 5 group: 2 } - bottom: "pool1" - top: "conv2" } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" + bottom: "conv2" + top: "norm2" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } - bottom: "conv2" - top: "norm2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" + bottom: "norm2" + top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 } - bottom: "norm2" - top: "pool2" } -layers { +layer { name: "conv3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 kernel_size: 3 } - bottom: "pool2" - top: "conv3" } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 kernel_size: 3 group: 2 } - bottom: "conv3" - top: "conv4" } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 kernel_size: 3 group: 2 } - bottom: "conv4" - top: "conv5" } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" + bottom: "conv5" + top: "pool5" pooling_param { pool: MAX kernel_size: 3 stride: 2 } - bottom: "conv5" - top: "pool5" } -layers { +layer { name: "fc6" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 } - bottom: "pool5" - top: "fc6" } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" + bottom: "fc6" + top: "fc6" dropout_param { dropout_ratio: 0.5 } - bottom: "fc6" - top: "fc6" } -layers { +layer { name: "fc7" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 } - bottom: "fc6" - top: "fc7" } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" + bottom: "fc7" + top: "fc7" dropout_param { dropout_ratio: 0.5 } - bottom: "fc7" - top: "fc7" } -layers { +layer { name: "fc8" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 } - bottom: "fc7" - top: "fc8" } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8" top: "prob" } diff --git a/models/bvlc_alexnet/train_val.prototxt b/models/bvlc_alexnet/train_val.prototxt index 717b6fa447c..588b4ea7cb5 100644 --- a/models/bvlc_alexnet/train_val.prototxt +++ b/models/bvlc_alexnet/train_val.prototxt @@ -1,47 +1,55 @@ name: "AlexNet" -layers { +layer { name: "data" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/imagenet/ilsvrc12_train_lmdb" - backend: LMDB - batch_size: 256 + include { + phase: TRAIN } transform_param { + mirror: true crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: true } - include: { phase: TRAIN } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 256 + backend: LMDB + } } -layers { +layer { name: "data" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/imagenet/ilsvrc12_val_lmdb" - backend: LMDB - batch_size: 50 + include { + phase: TEST } transform_param { + mirror: false crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: false } - include: { phase: TEST } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -56,15 +64,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "conv1" top: "norm1" lrn_param { @@ -73,9 +81,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "norm1" top: "pool1" pooling_param { @@ -84,15 +92,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -108,15 +120,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "conv2" top: "norm2" lrn_param { @@ -125,9 +137,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "norm2" top: "pool2" pooling_param { @@ -136,15 +148,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "pool2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -159,21 +175,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -189,21 +209,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -219,15 +243,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -236,15 +260,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -257,30 +285,34 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -293,30 +325,34 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -329,17 +365,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc8" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8" bottom: "label" top: "loss" diff --git a/models/bvlc_googlenet/deploy.prototxt b/models/bvlc_googlenet/deploy.prototxt index e31a4c9cd00..4648bf26efc 100644 --- a/models/bvlc_googlenet/deploy.prototxt +++ b/models/bvlc_googlenet/deploy.prototxt @@ -4,15 +4,19 @@ input_dim: 10 input_dim: 3 input_dim: 224 input_dim: 224 -layers { +layer { + name: "conv1/7x7_s2" + type: "Convolution" bottom: "data" top: "conv1/7x7_s2" - name: "conv1/7x7_s2" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 3 @@ -28,43 +32,47 @@ layers { } } } -layers { +layer { + name: "conv1/relu_7x7" + type: "ReLU" bottom: "conv1/7x7_s2" top: "conv1/7x7_s2" - name: "conv1/relu_7x7" - type: RELU } -layers { +layer { + name: "pool1/3x3_s2" + type: "Pooling" bottom: "conv1/7x7_s2" top: "pool1/3x3_s2" - name: "pool1/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "pool1/norm1" + type: "LRN" bottom: "pool1/3x3_s2" top: "pool1/norm1" - name: "pool1/norm1" - type: LRN lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } -layers { +layer { + name: "conv2/3x3_reduce" + type: "Convolution" bottom: "pool1/norm1" top: "conv2/3x3_reduce" - name: "conv2/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -78,21 +86,25 @@ layers { } } } -layers { +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" bottom: "conv2/3x3_reduce" top: "conv2/3x3_reduce" - name: "conv2/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "conv2/3x3" + type: "Convolution" bottom: "conv2/3x3_reduce" top: "conv2/3x3" - name: "conv2/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 pad: 1 @@ -107,43 +119,47 @@ layers { } } } -layers { +layer { + name: "conv2/relu_3x3" + type: "ReLU" bottom: "conv2/3x3" top: "conv2/3x3" - name: "conv2/relu_3x3" - type: RELU } -layers { +layer { + name: "conv2/norm2" + type: "LRN" bottom: "conv2/3x3" top: "conv2/norm2" - name: "conv2/norm2" - type: LRN lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } -layers { +layer { + name: "pool2/3x3_s2" + type: "Pooling" bottom: "conv2/norm2" top: "pool2/3x3_s2" - name: "pool2/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_3a/1x1" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/1x1" - name: "inception_3a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -157,21 +173,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" bottom: "inception_3a/1x1" top: "inception_3a/1x1" - name: "inception_3a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/3x3_reduce" - name: "inception_3a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 1 @@ -185,21 +205,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_3a/3x3_reduce" top: "inception_3a/3x3_reduce" - name: "inception_3a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_3a/3x3" + type: "Convolution" bottom: "inception_3a/3x3_reduce" top: "inception_3a/3x3" - name: "inception_3a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 1 @@ -214,21 +238,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" bottom: "inception_3a/3x3" top: "inception_3a/3x3" - name: "inception_3a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/5x5_reduce" - name: "inception_3a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 16 kernel_size: 1 @@ -242,21 +270,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_3a/5x5_reduce" top: "inception_3a/5x5_reduce" - name: "inception_3a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_3a/5x5" + type: "Convolution" bottom: "inception_3a/5x5_reduce" top: "inception_3a/5x5" - name: "inception_3a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 pad: 2 @@ -271,17 +303,17 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" bottom: "inception_3a/5x5" top: "inception_3a/5x5" - name: "inception_3a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_3a/pool" + type: "Pooling" bottom: "pool2/3x3_s2" top: "inception_3a/pool" - name: "inception_3a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -289,15 +321,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_3a/pool_proj" + type: "Convolution" bottom: "inception_3a/pool" top: "inception_3a/pool_proj" - name: "inception_3a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -311,30 +347,34 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" bottom: "inception_3a/pool_proj" top: "inception_3a/pool_proj" - name: "inception_3a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_3a/output" + type: "Concat" bottom: "inception_3a/1x1" bottom: "inception_3a/3x3" bottom: "inception_3a/5x5" bottom: "inception_3a/pool_proj" top: "inception_3a/output" - name: "inception_3a/output" - type: CONCAT } -layers { +layer { + name: "inception_3b/1x1" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/1x1" - name: "inception_3b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -348,21 +388,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" bottom: "inception_3b/1x1" top: "inception_3b/1x1" - name: "inception_3b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/3x3_reduce" - name: "inception_3b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -376,21 +420,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_3b/3x3_reduce" top: "inception_3b/3x3_reduce" - name: "inception_3b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_3b/3x3" + type: "Convolution" bottom: "inception_3b/3x3_reduce" top: "inception_3b/3x3" - name: "inception_3b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 pad: 1 @@ -405,21 +453,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" bottom: "inception_3b/3x3" top: "inception_3b/3x3" - name: "inception_3b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/5x5_reduce" - name: "inception_3b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -433,21 +485,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_3b/5x5_reduce" top: "inception_3b/5x5_reduce" - name: "inception_3b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_3b/5x5" + type: "Convolution" bottom: "inception_3b/5x5_reduce" top: "inception_3b/5x5" - name: "inception_3b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 pad: 2 @@ -462,17 +518,17 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" bottom: "inception_3b/5x5" top: "inception_3b/5x5" - name: "inception_3b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_3b/pool" + type: "Pooling" bottom: "inception_3a/output" top: "inception_3b/pool" - name: "inception_3b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -480,15 +536,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_3b/pool_proj" + type: "Convolution" bottom: "inception_3b/pool" top: "inception_3b/pool_proj" - name: "inception_3b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -502,41 +562,45 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" bottom: "inception_3b/pool_proj" top: "inception_3b/pool_proj" - name: "inception_3b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_3b/output" + type: "Concat" bottom: "inception_3b/1x1" bottom: "inception_3b/3x3" bottom: "inception_3b/5x5" bottom: "inception_3b/pool_proj" top: "inception_3b/output" - name: "inception_3b/output" - type: CONCAT } -layers { +layer { + name: "pool3/3x3_s2" + type: "Pooling" bottom: "inception_3b/output" top: "pool3/3x3_s2" - name: "pool3/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_4a/1x1" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/1x1" - name: "inception_4a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 kernel_size: 1 @@ -550,21 +614,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" bottom: "inception_4a/1x1" top: "inception_4a/1x1" - name: "inception_4a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/3x3_reduce" - name: "inception_4a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 1 @@ -578,21 +646,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4a/3x3_reduce" top: "inception_4a/3x3_reduce" - name: "inception_4a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4a/3x3" + type: "Convolution" bottom: "inception_4a/3x3_reduce" top: "inception_4a/3x3" - name: "inception_4a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 208 pad: 1 @@ -607,21 +679,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" bottom: "inception_4a/3x3" top: "inception_4a/3x3" - name: "inception_4a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/5x5_reduce" - name: "inception_4a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 16 kernel_size: 1 @@ -635,21 +711,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4a/5x5_reduce" top: "inception_4a/5x5_reduce" - name: "inception_4a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4a/5x5" + type: "Convolution" bottom: "inception_4a/5x5_reduce" top: "inception_4a/5x5" - name: "inception_4a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 48 pad: 2 @@ -664,17 +744,17 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" bottom: "inception_4a/5x5" top: "inception_4a/5x5" - name: "inception_4a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4a/pool" + type: "Pooling" bottom: "pool3/3x3_s2" top: "inception_4a/pool" - name: "inception_4a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -682,15 +762,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4a/pool_proj" + type: "Convolution" bottom: "inception_4a/pool" top: "inception_4a/pool_proj" - name: "inception_4a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -704,30 +788,34 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" bottom: "inception_4a/pool_proj" top: "inception_4a/pool_proj" - name: "inception_4a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4a/output" + type: "Concat" bottom: "inception_4a/1x1" bottom: "inception_4a/3x3" bottom: "inception_4a/5x5" bottom: "inception_4a/pool_proj" top: "inception_4a/output" - name: "inception_4a/output" - type: CONCAT } -layers { +layer { + name: "inception_4b/1x1" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/1x1" - name: "inception_4b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -741,21 +829,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" bottom: "inception_4b/1x1" top: "inception_4b/1x1" - name: "inception_4b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/3x3_reduce" - name: "inception_4b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 112 kernel_size: 1 @@ -769,21 +861,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4b/3x3_reduce" top: "inception_4b/3x3_reduce" - name: "inception_4b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4b/3x3" + type: "Convolution" bottom: "inception_4b/3x3_reduce" top: "inception_4b/3x3" - name: "inception_4b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 224 pad: 1 @@ -798,21 +894,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" bottom: "inception_4b/3x3" top: "inception_4b/3x3" - name: "inception_4b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/5x5_reduce" - name: "inception_4b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 24 kernel_size: 1 @@ -826,21 +926,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4b/5x5_reduce" top: "inception_4b/5x5_reduce" - name: "inception_4b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4b/5x5" + type: "Convolution" bottom: "inception_4b/5x5_reduce" top: "inception_4b/5x5" - name: "inception_4b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -855,17 +959,17 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" bottom: "inception_4b/5x5" top: "inception_4b/5x5" - name: "inception_4b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4b/pool" + type: "Pooling" bottom: "inception_4a/output" top: "inception_4b/pool" - name: "inception_4b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -873,15 +977,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4b/pool_proj" + type: "Convolution" bottom: "inception_4b/pool" top: "inception_4b/pool_proj" - name: "inception_4b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -895,30 +1003,34 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" bottom: "inception_4b/pool_proj" top: "inception_4b/pool_proj" - name: "inception_4b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4b/output" + type: "Concat" bottom: "inception_4b/1x1" bottom: "inception_4b/3x3" bottom: "inception_4b/5x5" bottom: "inception_4b/pool_proj" top: "inception_4b/output" - name: "inception_4b/output" - type: CONCAT } -layers { +layer { + name: "inception_4c/1x1" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/1x1" - name: "inception_4c/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -932,21 +1044,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" bottom: "inception_4c/1x1" top: "inception_4c/1x1" - name: "inception_4c/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/3x3_reduce" - name: "inception_4c/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -960,21 +1076,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4c/3x3_reduce" top: "inception_4c/3x3_reduce" - name: "inception_4c/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4c/3x3" + type: "Convolution" bottom: "inception_4c/3x3_reduce" top: "inception_4c/3x3" - name: "inception_4c/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -989,21 +1109,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" bottom: "inception_4c/3x3" top: "inception_4c/3x3" - name: "inception_4c/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/5x5_reduce" - name: "inception_4c/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 24 kernel_size: 1 @@ -1017,21 +1141,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4c/5x5_reduce" top: "inception_4c/5x5_reduce" - name: "inception_4c/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4c/5x5" + type: "Convolution" bottom: "inception_4c/5x5_reduce" top: "inception_4c/5x5" - name: "inception_4c/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -1046,17 +1174,17 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" bottom: "inception_4c/5x5" top: "inception_4c/5x5" - name: "inception_4c/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4c/pool" + type: "Pooling" bottom: "inception_4b/output" top: "inception_4c/pool" - name: "inception_4c/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1064,15 +1192,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4c/pool_proj" + type: "Convolution" bottom: "inception_4c/pool" top: "inception_4c/pool_proj" - name: "inception_4c/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -1086,30 +1218,34 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" bottom: "inception_4c/pool_proj" top: "inception_4c/pool_proj" - name: "inception_4c/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4c/output" + type: "Concat" bottom: "inception_4c/1x1" bottom: "inception_4c/3x3" bottom: "inception_4c/5x5" bottom: "inception_4c/pool_proj" top: "inception_4c/output" - name: "inception_4c/output" - type: CONCAT } -layers { +layer { + name: "inception_4d/1x1" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/1x1" - name: "inception_4d/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 112 kernel_size: 1 @@ -1123,21 +1259,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" bottom: "inception_4d/1x1" top: "inception_4d/1x1" - name: "inception_4d/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/3x3_reduce" - name: "inception_4d/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 144 kernel_size: 1 @@ -1151,21 +1291,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4d/3x3_reduce" top: "inception_4d/3x3_reduce" - name: "inception_4d/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4d/3x3" + type: "Convolution" bottom: "inception_4d/3x3_reduce" top: "inception_4d/3x3" - name: "inception_4d/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 288 pad: 1 @@ -1180,21 +1324,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" bottom: "inception_4d/3x3" top: "inception_4d/3x3" - name: "inception_4d/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/5x5_reduce" - name: "inception_4d/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1208,21 +1356,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4d/5x5_reduce" top: "inception_4d/5x5_reduce" - name: "inception_4d/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4d/5x5" + type: "Convolution" bottom: "inception_4d/5x5_reduce" top: "inception_4d/5x5" - name: "inception_4d/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -1237,17 +1389,17 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" bottom: "inception_4d/5x5" top: "inception_4d/5x5" - name: "inception_4d/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4d/pool" + type: "Pooling" bottom: "inception_4c/output" top: "inception_4d/pool" - name: "inception_4d/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1255,15 +1407,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4d/pool_proj" + type: "Convolution" bottom: "inception_4d/pool" top: "inception_4d/pool_proj" - name: "inception_4d/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -1277,30 +1433,34 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" bottom: "inception_4d/pool_proj" top: "inception_4d/pool_proj" - name: "inception_4d/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4d/output" + type: "Concat" bottom: "inception_4d/1x1" bottom: "inception_4d/3x3" bottom: "inception_4d/5x5" bottom: "inception_4d/pool_proj" top: "inception_4d/output" - name: "inception_4d/output" - type: CONCAT } -layers { +layer { + name: "inception_4e/1x1" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/1x1" - name: "inception_4e/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 kernel_size: 1 @@ -1314,21 +1474,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" bottom: "inception_4e/1x1" top: "inception_4e/1x1" - name: "inception_4e/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/3x3_reduce" - name: "inception_4e/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -1342,21 +1506,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4e/3x3_reduce" top: "inception_4e/3x3_reduce" - name: "inception_4e/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4e/3x3" + type: "Convolution" bottom: "inception_4e/3x3_reduce" top: "inception_4e/3x3" - name: "inception_4e/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 320 pad: 1 @@ -1371,21 +1539,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" bottom: "inception_4e/3x3" top: "inception_4e/3x3" - name: "inception_4e/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/5x5_reduce" - name: "inception_4e/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1399,21 +1571,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4e/5x5_reduce" top: "inception_4e/5x5_reduce" - name: "inception_4e/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4e/5x5" + type: "Convolution" bottom: "inception_4e/5x5_reduce" top: "inception_4e/5x5" - name: "inception_4e/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -1428,17 +1604,17 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" bottom: "inception_4e/5x5" top: "inception_4e/5x5" - name: "inception_4e/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4e/pool" + type: "Pooling" bottom: "inception_4d/output" top: "inception_4e/pool" - name: "inception_4e/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1446,15 +1622,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4e/pool_proj" + type: "Convolution" bottom: "inception_4e/pool" top: "inception_4e/pool_proj" - name: "inception_4e/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1468,41 +1648,45 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" bottom: "inception_4e/pool_proj" top: "inception_4e/pool_proj" - name: "inception_4e/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4e/output" + type: "Concat" bottom: "inception_4e/1x1" bottom: "inception_4e/3x3" bottom: "inception_4e/5x5" bottom: "inception_4e/pool_proj" top: "inception_4e/output" - name: "inception_4e/output" - type: CONCAT } -layers { +layer { + name: "pool4/3x3_s2" + type: "Pooling" bottom: "inception_4e/output" top: "pool4/3x3_s2" - name: "pool4/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_5a/1x1" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/1x1" - name: "inception_5a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 kernel_size: 1 @@ -1516,21 +1700,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" bottom: "inception_5a/1x1" top: "inception_5a/1x1" - name: "inception_5a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/3x3_reduce" - name: "inception_5a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -1544,21 +1732,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_5a/3x3_reduce" top: "inception_5a/3x3_reduce" - name: "inception_5a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_5a/3x3" + type: "Convolution" bottom: "inception_5a/3x3_reduce" top: "inception_5a/3x3" - name: "inception_5a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 320 pad: 1 @@ -1573,21 +1765,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" bottom: "inception_5a/3x3" top: "inception_5a/3x3" - name: "inception_5a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/5x5_reduce" - name: "inception_5a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1601,21 +1797,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_5a/5x5_reduce" top: "inception_5a/5x5_reduce" - name: "inception_5a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_5a/5x5" + type: "Convolution" bottom: "inception_5a/5x5_reduce" top: "inception_5a/5x5" - name: "inception_5a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -1630,17 +1830,17 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" bottom: "inception_5a/5x5" top: "inception_5a/5x5" - name: "inception_5a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_5a/pool" + type: "Pooling" bottom: "pool4/3x3_s2" top: "inception_5a/pool" - name: "inception_5a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1648,15 +1848,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_5a/pool_proj" + type: "Convolution" bottom: "inception_5a/pool" top: "inception_5a/pool_proj" - name: "inception_5a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1670,30 +1874,34 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" bottom: "inception_5a/pool_proj" top: "inception_5a/pool_proj" - name: "inception_5a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_5a/output" + type: "Concat" bottom: "inception_5a/1x1" bottom: "inception_5a/3x3" bottom: "inception_5a/5x5" bottom: "inception_5a/pool_proj" top: "inception_5a/output" - name: "inception_5a/output" - type: CONCAT } -layers { +layer { + name: "inception_5b/1x1" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/1x1" - name: "inception_5b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 kernel_size: 1 @@ -1707,21 +1915,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" bottom: "inception_5b/1x1" top: "inception_5b/1x1" - name: "inception_5b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/3x3_reduce" - name: "inception_5b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 kernel_size: 1 @@ -1735,21 +1947,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_5b/3x3_reduce" top: "inception_5b/3x3_reduce" - name: "inception_5b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_5b/3x3" + type: "Convolution" bottom: "inception_5b/3x3_reduce" top: "inception_5b/3x3" - name: "inception_5b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -1764,21 +1980,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" bottom: "inception_5b/3x3" top: "inception_5b/3x3" - name: "inception_5b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/5x5_reduce" - name: "inception_5b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 48 kernel_size: 1 @@ -1792,21 +2012,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_5b/5x5_reduce" top: "inception_5b/5x5_reduce" - name: "inception_5b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_5b/5x5" + type: "Convolution" bottom: "inception_5b/5x5_reduce" top: "inception_5b/5x5" - name: "inception_5b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -1821,17 +2045,17 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" bottom: "inception_5b/5x5" top: "inception_5b/5x5" - name: "inception_5b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_5b/pool" + type: "Pooling" bottom: "inception_5a/output" top: "inception_5b/pool" - name: "inception_5b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1839,15 +2063,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_5b/pool_proj" + type: "Convolution" bottom: "inception_5b/pool" top: "inception_5b/pool_proj" - name: "inception_5b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1861,50 +2089,54 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" bottom: "inception_5b/pool_proj" top: "inception_5b/pool_proj" - name: "inception_5b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_5b/output" + type: "Concat" bottom: "inception_5b/1x1" bottom: "inception_5b/3x3" bottom: "inception_5b/5x5" bottom: "inception_5b/pool_proj" top: "inception_5b/output" - name: "inception_5b/output" - type: CONCAT } -layers { +layer { + name: "pool5/7x7_s1" + type: "Pooling" bottom: "inception_5b/output" top: "pool5/7x7_s1" - name: "pool5/7x7_s1" - type: POOLING pooling_param { pool: AVE kernel_size: 7 stride: 1 } } -layers { +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" bottom: "pool5/7x7_s1" top: "pool5/7x7_s1" - name: "pool5/drop_7x7_s1" - type: DROPOUT dropout_param { dropout_ratio: 0.4 } } -layers { +layer { + name: "loss3/classifier" + type: "InnerProduct" bottom: "pool5/7x7_s1" top: "loss3/classifier" - name: "loss3/classifier" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -1916,9 +2148,9 @@ layers { } } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "loss3/classifier" top: "prob" -} \ No newline at end of file +} diff --git a/models/bvlc_googlenet/train_val.prototxt b/models/bvlc_googlenet/train_val.prototxt index cd8f38abdc8..79ede2b9d9c 100644 --- a/models/bvlc_googlenet/train_val.prototxt +++ b/models/bvlc_googlenet/train_val.prototxt @@ -1,14 +1,9 @@ name: "GoogleNet" -layers { +layer { + name: "data" + type: "Data" top: "data" top: "label" - name: "data" - type: DATA - data_param { - source: "examples/imagenet/ilsvrc12_train_lmdb" - batch_size: 32 - backend: LMDB - } include { phase: TRAIN } @@ -19,17 +14,17 @@ layers { mean_value: 117 mean_value: 123 } -} -layers { - top: "data" - top: "label" - name: "data" - type: DATA data_param { - source: "examples/imagenet/ilsvrc12_val_lmdb" - batch_size: 50 + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 32 backend: LMDB } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" include { phase: TEST } @@ -40,16 +35,25 @@ layers { mean_value: 117 mean_value: 123 } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } } -layers { +layer { + name: "conv1/7x7_s2" + type: "Convolution" bottom: "data" top: "conv1/7x7_s2" - name: "conv1/7x7_s2" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 3 @@ -65,43 +69,47 @@ layers { } } } -layers { +layer { + name: "conv1/relu_7x7" + type: "ReLU" bottom: "conv1/7x7_s2" top: "conv1/7x7_s2" - name: "conv1/relu_7x7" - type: RELU } -layers { +layer { + name: "pool1/3x3_s2" + type: "Pooling" bottom: "conv1/7x7_s2" top: "pool1/3x3_s2" - name: "pool1/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "pool1/norm1" + type: "LRN" bottom: "pool1/3x3_s2" top: "pool1/norm1" - name: "pool1/norm1" - type: LRN lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } -layers { +layer { + name: "conv2/3x3_reduce" + type: "Convolution" bottom: "pool1/norm1" top: "conv2/3x3_reduce" - name: "conv2/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -115,21 +123,25 @@ layers { } } } -layers { +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" bottom: "conv2/3x3_reduce" top: "conv2/3x3_reduce" - name: "conv2/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "conv2/3x3" + type: "Convolution" bottom: "conv2/3x3_reduce" top: "conv2/3x3" - name: "conv2/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 pad: 1 @@ -144,43 +156,47 @@ layers { } } } -layers { +layer { + name: "conv2/relu_3x3" + type: "ReLU" bottom: "conv2/3x3" top: "conv2/3x3" - name: "conv2/relu_3x3" - type: RELU } -layers { +layer { + name: "conv2/norm2" + type: "LRN" bottom: "conv2/3x3" top: "conv2/norm2" - name: "conv2/norm2" - type: LRN lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } -layers { +layer { + name: "pool2/3x3_s2" + type: "Pooling" bottom: "conv2/norm2" top: "pool2/3x3_s2" - name: "pool2/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_3a/1x1" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/1x1" - name: "inception_3a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -194,21 +210,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" bottom: "inception_3a/1x1" top: "inception_3a/1x1" - name: "inception_3a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/3x3_reduce" - name: "inception_3a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 1 @@ -222,21 +242,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_3a/3x3_reduce" top: "inception_3a/3x3_reduce" - name: "inception_3a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_3a/3x3" + type: "Convolution" bottom: "inception_3a/3x3_reduce" top: "inception_3a/3x3" - name: "inception_3a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 1 @@ -251,21 +275,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" bottom: "inception_3a/3x3" top: "inception_3a/3x3" - name: "inception_3a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" bottom: "pool2/3x3_s2" top: "inception_3a/5x5_reduce" - name: "inception_3a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 16 kernel_size: 1 @@ -279,21 +307,25 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_3a/5x5_reduce" top: "inception_3a/5x5_reduce" - name: "inception_3a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_3a/5x5" + type: "Convolution" bottom: "inception_3a/5x5_reduce" top: "inception_3a/5x5" - name: "inception_3a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 pad: 2 @@ -308,17 +340,17 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" bottom: "inception_3a/5x5" top: "inception_3a/5x5" - name: "inception_3a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_3a/pool" + type: "Pooling" bottom: "pool2/3x3_s2" top: "inception_3a/pool" - name: "inception_3a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -326,15 +358,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_3a/pool_proj" + type: "Convolution" bottom: "inception_3a/pool" top: "inception_3a/pool_proj" - name: "inception_3a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -348,30 +384,34 @@ layers { } } } -layers { +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" bottom: "inception_3a/pool_proj" top: "inception_3a/pool_proj" - name: "inception_3a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_3a/output" + type: "Concat" bottom: "inception_3a/1x1" bottom: "inception_3a/3x3" bottom: "inception_3a/5x5" bottom: "inception_3a/pool_proj" top: "inception_3a/output" - name: "inception_3a/output" - type: CONCAT } -layers { +layer { + name: "inception_3b/1x1" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/1x1" - name: "inception_3b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -385,21 +425,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" bottom: "inception_3b/1x1" top: "inception_3b/1x1" - name: "inception_3b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/3x3_reduce" - name: "inception_3b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -413,21 +457,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_3b/3x3_reduce" top: "inception_3b/3x3_reduce" - name: "inception_3b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_3b/3x3" + type: "Convolution" bottom: "inception_3b/3x3_reduce" top: "inception_3b/3x3" - name: "inception_3b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 pad: 1 @@ -442,21 +490,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" bottom: "inception_3b/3x3" top: "inception_3b/3x3" - name: "inception_3b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" bottom: "inception_3a/output" top: "inception_3b/5x5_reduce" - name: "inception_3b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -470,21 +522,25 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_3b/5x5_reduce" top: "inception_3b/5x5_reduce" - name: "inception_3b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_3b/5x5" + type: "Convolution" bottom: "inception_3b/5x5_reduce" top: "inception_3b/5x5" - name: "inception_3b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 pad: 2 @@ -499,17 +555,17 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" bottom: "inception_3b/5x5" top: "inception_3b/5x5" - name: "inception_3b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_3b/pool" + type: "Pooling" bottom: "inception_3a/output" top: "inception_3b/pool" - name: "inception_3b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -517,15 +573,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_3b/pool_proj" + type: "Convolution" bottom: "inception_3b/pool" top: "inception_3b/pool_proj" - name: "inception_3b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -539,41 +599,45 @@ layers { } } } -layers { +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" bottom: "inception_3b/pool_proj" top: "inception_3b/pool_proj" - name: "inception_3b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_3b/output" + type: "Concat" bottom: "inception_3b/1x1" bottom: "inception_3b/3x3" bottom: "inception_3b/5x5" bottom: "inception_3b/pool_proj" top: "inception_3b/output" - name: "inception_3b/output" - type: CONCAT } -layers { +layer { + name: "pool3/3x3_s2" + type: "Pooling" bottom: "inception_3b/output" top: "pool3/3x3_s2" - name: "pool3/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_4a/1x1" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/1x1" - name: "inception_4a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 kernel_size: 1 @@ -587,21 +651,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" bottom: "inception_4a/1x1" top: "inception_4a/1x1" - name: "inception_4a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/3x3_reduce" - name: "inception_4a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 1 @@ -615,21 +683,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4a/3x3_reduce" top: "inception_4a/3x3_reduce" - name: "inception_4a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4a/3x3" + type: "Convolution" bottom: "inception_4a/3x3_reduce" top: "inception_4a/3x3" - name: "inception_4a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 208 pad: 1 @@ -644,21 +716,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" bottom: "inception_4a/3x3" top: "inception_4a/3x3" - name: "inception_4a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" bottom: "pool3/3x3_s2" top: "inception_4a/5x5_reduce" - name: "inception_4a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 16 kernel_size: 1 @@ -672,21 +748,25 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4a/5x5_reduce" top: "inception_4a/5x5_reduce" - name: "inception_4a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4a/5x5" + type: "Convolution" bottom: "inception_4a/5x5_reduce" top: "inception_4a/5x5" - name: "inception_4a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 48 pad: 2 @@ -701,17 +781,17 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" bottom: "inception_4a/5x5" top: "inception_4a/5x5" - name: "inception_4a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4a/pool" + type: "Pooling" bottom: "pool3/3x3_s2" top: "inception_4a/pool" - name: "inception_4a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -719,15 +799,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4a/pool_proj" + type: "Convolution" bottom: "inception_4a/pool" top: "inception_4a/pool_proj" - name: "inception_4a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -741,41 +825,45 @@ layers { } } } -layers { +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" bottom: "inception_4a/pool_proj" top: "inception_4a/pool_proj" - name: "inception_4a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4a/output" + type: "Concat" bottom: "inception_4a/1x1" bottom: "inception_4a/3x3" bottom: "inception_4a/5x5" bottom: "inception_4a/pool_proj" top: "inception_4a/output" - name: "inception_4a/output" - type: CONCAT } -layers { +layer { + name: "loss1/ave_pool" + type: "Pooling" bottom: "inception_4a/output" top: "loss1/ave_pool" - name: "loss1/ave_pool" - type: POOLING pooling_param { pool: AVE kernel_size: 5 stride: 3 } } -layers { +layer { + name: "loss1/conv" + type: "Convolution" bottom: "loss1/ave_pool" top: "loss1/conv" - name: "loss1/conv" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -789,21 +877,25 @@ layers { } } } -layers { +layer { + name: "loss1/relu_conv" + type: "ReLU" bottom: "loss1/conv" top: "loss1/conv" - name: "loss1/relu_conv" - type: RELU } -layers { +layer { + name: "loss1/fc" + type: "InnerProduct" bottom: "loss1/conv" top: "loss1/fc" - name: "loss1/fc" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1024 weight_filler { @@ -816,30 +908,34 @@ layers { } } } -layers { +layer { + name: "loss1/relu_fc" + type: "ReLU" bottom: "loss1/fc" top: "loss1/fc" - name: "loss1/relu_fc" - type: RELU } -layers { +layer { + name: "loss1/drop_fc" + type: "Dropout" bottom: "loss1/fc" top: "loss1/fc" - name: "loss1/drop_fc" - type: DROPOUT dropout_param { dropout_ratio: 0.7 } } -layers { +layer { + name: "loss1/classifier" + type: "InnerProduct" bottom: "loss1/fc" top: "loss1/classifier" - name: "loss1/classifier" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -852,46 +948,50 @@ layers { } } } -layers { +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" bottom: "loss1/classifier" bottom: "label" top: "loss1/loss1" - name: "loss1/loss" - type: SOFTMAX_LOSS loss_weight: 0.3 } -layers { +layer { + name: "loss1/top-1" + type: "Accuracy" bottom: "loss1/classifier" bottom: "label" top: "loss1/top-1" - name: "loss1/top-1" - type: ACCURACY include { phase: TEST } } -layers { +layer { + name: "loss1/top-5" + type: "Accuracy" bottom: "loss1/classifier" bottom: "label" top: "loss1/top-5" - name: "loss1/top-5" - type: ACCURACY - accuracy_param { - top_k: 5 - } include { phase: TEST } + accuracy_param { + top_k: 5 + } } -layers { +layer { + name: "inception_4b/1x1" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/1x1" - name: "inception_4b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -905,21 +1005,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" bottom: "inception_4b/1x1" top: "inception_4b/1x1" - name: "inception_4b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/3x3_reduce" - name: "inception_4b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 112 kernel_size: 1 @@ -933,21 +1037,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4b/3x3_reduce" top: "inception_4b/3x3_reduce" - name: "inception_4b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4b/3x3" + type: "Convolution" bottom: "inception_4b/3x3_reduce" top: "inception_4b/3x3" - name: "inception_4b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 224 pad: 1 @@ -962,21 +1070,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" bottom: "inception_4b/3x3" top: "inception_4b/3x3" - name: "inception_4b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" bottom: "inception_4a/output" top: "inception_4b/5x5_reduce" - name: "inception_4b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 24 kernel_size: 1 @@ -990,21 +1102,25 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4b/5x5_reduce" top: "inception_4b/5x5_reduce" - name: "inception_4b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4b/5x5" + type: "Convolution" bottom: "inception_4b/5x5_reduce" top: "inception_4b/5x5" - name: "inception_4b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -1019,17 +1135,17 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" bottom: "inception_4b/5x5" top: "inception_4b/5x5" - name: "inception_4b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4b/pool" + type: "Pooling" bottom: "inception_4a/output" top: "inception_4b/pool" - name: "inception_4b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1037,15 +1153,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4b/pool_proj" + type: "Convolution" bottom: "inception_4b/pool" top: "inception_4b/pool_proj" - name: "inception_4b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -1059,30 +1179,34 @@ layers { } } } -layers { +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" bottom: "inception_4b/pool_proj" top: "inception_4b/pool_proj" - name: "inception_4b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4b/output" + type: "Concat" bottom: "inception_4b/1x1" bottom: "inception_4b/3x3" bottom: "inception_4b/5x5" bottom: "inception_4b/pool_proj" top: "inception_4b/output" - name: "inception_4b/output" - type: CONCAT } -layers { +layer { + name: "inception_4c/1x1" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/1x1" - name: "inception_4c/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1096,21 +1220,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" bottom: "inception_4c/1x1" top: "inception_4c/1x1" - name: "inception_4c/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/3x3_reduce" - name: "inception_4c/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1124,21 +1252,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4c/3x3_reduce" top: "inception_4c/3x3_reduce" - name: "inception_4c/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4c/3x3" + type: "Convolution" bottom: "inception_4c/3x3_reduce" top: "inception_4c/3x3" - name: "inception_4c/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -1153,21 +1285,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" bottom: "inception_4c/3x3" top: "inception_4c/3x3" - name: "inception_4c/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" bottom: "inception_4b/output" top: "inception_4c/5x5_reduce" - name: "inception_4c/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 24 kernel_size: 1 @@ -1181,21 +1317,25 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4c/5x5_reduce" top: "inception_4c/5x5_reduce" - name: "inception_4c/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4c/5x5" + type: "Convolution" bottom: "inception_4c/5x5_reduce" top: "inception_4c/5x5" - name: "inception_4c/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -1210,17 +1350,17 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" bottom: "inception_4c/5x5" top: "inception_4c/5x5" - name: "inception_4c/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4c/pool" + type: "Pooling" bottom: "inception_4b/output" top: "inception_4c/pool" - name: "inception_4c/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1228,15 +1368,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4c/pool_proj" + type: "Convolution" bottom: "inception_4c/pool" top: "inception_4c/pool_proj" - name: "inception_4c/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -1250,30 +1394,34 @@ layers { } } } -layers { +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" bottom: "inception_4c/pool_proj" top: "inception_4c/pool_proj" - name: "inception_4c/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4c/output" + type: "Concat" bottom: "inception_4c/1x1" bottom: "inception_4c/3x3" bottom: "inception_4c/5x5" bottom: "inception_4c/pool_proj" top: "inception_4c/output" - name: "inception_4c/output" - type: CONCAT } -layers { +layer { + name: "inception_4d/1x1" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/1x1" - name: "inception_4d/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 112 kernel_size: 1 @@ -1287,21 +1435,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" bottom: "inception_4d/1x1" top: "inception_4d/1x1" - name: "inception_4d/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/3x3_reduce" - name: "inception_4d/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 144 kernel_size: 1 @@ -1315,21 +1467,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4d/3x3_reduce" top: "inception_4d/3x3_reduce" - name: "inception_4d/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4d/3x3" + type: "Convolution" bottom: "inception_4d/3x3_reduce" top: "inception_4d/3x3" - name: "inception_4d/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 288 pad: 1 @@ -1344,21 +1500,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" bottom: "inception_4d/3x3" top: "inception_4d/3x3" - name: "inception_4d/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" bottom: "inception_4c/output" top: "inception_4d/5x5_reduce" - name: "inception_4d/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1372,21 +1532,25 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4d/5x5_reduce" top: "inception_4d/5x5_reduce" - name: "inception_4d/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4d/5x5" + type: "Convolution" bottom: "inception_4d/5x5_reduce" top: "inception_4d/5x5" - name: "inception_4d/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 pad: 2 @@ -1401,17 +1565,17 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" bottom: "inception_4d/5x5" top: "inception_4d/5x5" - name: "inception_4d/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4d/pool" + type: "Pooling" bottom: "inception_4c/output" top: "inception_4d/pool" - name: "inception_4d/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1419,15 +1583,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4d/pool_proj" + type: "Convolution" bottom: "inception_4d/pool" top: "inception_4d/pool_proj" - name: "inception_4d/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 64 kernel_size: 1 @@ -1441,41 +1609,45 @@ layers { } } } -layers { +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" bottom: "inception_4d/pool_proj" top: "inception_4d/pool_proj" - name: "inception_4d/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4d/output" + type: "Concat" bottom: "inception_4d/1x1" bottom: "inception_4d/3x3" bottom: "inception_4d/5x5" bottom: "inception_4d/pool_proj" top: "inception_4d/output" - name: "inception_4d/output" - type: CONCAT } -layers { +layer { + name: "loss2/ave_pool" + type: "Pooling" bottom: "inception_4d/output" top: "loss2/ave_pool" - name: "loss2/ave_pool" - type: POOLING pooling_param { pool: AVE kernel_size: 5 stride: 3 } } -layers { +layer { + name: "loss2/conv" + type: "Convolution" bottom: "loss2/ave_pool" top: "loss2/conv" - name: "loss2/conv" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1489,21 +1661,25 @@ layers { } } } -layers { +layer { + name: "loss2/relu_conv" + type: "ReLU" bottom: "loss2/conv" top: "loss2/conv" - name: "loss2/relu_conv" - type: RELU } -layers { +layer { + name: "loss2/fc" + type: "InnerProduct" bottom: "loss2/conv" top: "loss2/fc" - name: "loss2/fc" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1024 weight_filler { @@ -1516,30 +1692,34 @@ layers { } } } -layers { +layer { + name: "loss2/relu_fc" + type: "ReLU" bottom: "loss2/fc" top: "loss2/fc" - name: "loss2/relu_fc" - type: RELU } -layers { +layer { + name: "loss2/drop_fc" + type: "Dropout" bottom: "loss2/fc" top: "loss2/fc" - name: "loss2/drop_fc" - type: DROPOUT dropout_param { dropout_ratio: 0.7 } } -layers { +layer { + name: "loss2/classifier" + type: "InnerProduct" bottom: "loss2/fc" top: "loss2/classifier" - name: "loss2/classifier" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -1552,46 +1732,50 @@ layers { } } } -layers { +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" bottom: "loss2/classifier" bottom: "label" top: "loss2/loss1" - name: "loss2/loss" - type: SOFTMAX_LOSS loss_weight: 0.3 } -layers { +layer { + name: "loss2/top-1" + type: "Accuracy" bottom: "loss2/classifier" bottom: "label" top: "loss2/top-1" - name: "loss2/top-1" - type: ACCURACY include { phase: TEST } } -layers { +layer { + name: "loss2/top-5" + type: "Accuracy" bottom: "loss2/classifier" bottom: "label" top: "loss2/top-5" - name: "loss2/top-5" - type: ACCURACY - accuracy_param { - top_k: 5 - } include { phase: TEST } + accuracy_param { + top_k: 5 + } } -layers { +layer { + name: "inception_4e/1x1" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/1x1" - name: "inception_4e/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 kernel_size: 1 @@ -1605,21 +1789,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" bottom: "inception_4e/1x1" top: "inception_4e/1x1" - name: "inception_4e/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/3x3_reduce" - name: "inception_4e/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -1633,21 +1821,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" bottom: "inception_4e/3x3_reduce" top: "inception_4e/3x3_reduce" - name: "inception_4e/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_4e/3x3" + type: "Convolution" bottom: "inception_4e/3x3_reduce" top: "inception_4e/3x3" - name: "inception_4e/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 320 pad: 1 @@ -1662,21 +1854,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" bottom: "inception_4e/3x3" top: "inception_4e/3x3" - name: "inception_4e/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" bottom: "inception_4d/output" top: "inception_4e/5x5_reduce" - name: "inception_4e/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1690,21 +1886,25 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" bottom: "inception_4e/5x5_reduce" top: "inception_4e/5x5_reduce" - name: "inception_4e/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_4e/5x5" + type: "Convolution" bottom: "inception_4e/5x5_reduce" top: "inception_4e/5x5" - name: "inception_4e/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -1719,17 +1919,17 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" bottom: "inception_4e/5x5" top: "inception_4e/5x5" - name: "inception_4e/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_4e/pool" + type: "Pooling" bottom: "inception_4d/output" top: "inception_4e/pool" - name: "inception_4e/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1737,15 +1937,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_4e/pool_proj" + type: "Convolution" bottom: "inception_4e/pool" top: "inception_4e/pool_proj" - name: "inception_4e/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1759,41 +1963,45 @@ layers { } } } -layers { +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" bottom: "inception_4e/pool_proj" top: "inception_4e/pool_proj" - name: "inception_4e/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_4e/output" + type: "Concat" bottom: "inception_4e/1x1" bottom: "inception_4e/3x3" bottom: "inception_4e/5x5" bottom: "inception_4e/pool_proj" top: "inception_4e/output" - name: "inception_4e/output" - type: CONCAT } -layers { +layer { + name: "pool4/3x3_s2" + type: "Pooling" bottom: "inception_4e/output" top: "pool4/3x3_s2" - name: "pool4/3x3_s2" - type: POOLING pooling_param { pool: MAX kernel_size: 3 stride: 2 } } -layers { +layer { + name: "inception_5a/1x1" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/1x1" - name: "inception_5a/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 kernel_size: 1 @@ -1807,21 +2015,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" bottom: "inception_5a/1x1" top: "inception_5a/1x1" - name: "inception_5a/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/3x3_reduce" - name: "inception_5a/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 160 kernel_size: 1 @@ -1835,21 +2047,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" bottom: "inception_5a/3x3_reduce" top: "inception_5a/3x3_reduce" - name: "inception_5a/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_5a/3x3" + type: "Convolution" bottom: "inception_5a/3x3_reduce" top: "inception_5a/3x3" - name: "inception_5a/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 320 pad: 1 @@ -1864,21 +2080,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" bottom: "inception_5a/3x3" top: "inception_5a/3x3" - name: "inception_5a/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" bottom: "pool4/3x3_s2" top: "inception_5a/5x5_reduce" - name: "inception_5a/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 32 kernel_size: 1 @@ -1892,21 +2112,25 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" bottom: "inception_5a/5x5_reduce" top: "inception_5a/5x5_reduce" - name: "inception_5a/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_5a/5x5" + type: "Convolution" bottom: "inception_5a/5x5_reduce" top: "inception_5a/5x5" - name: "inception_5a/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -1921,17 +2145,17 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" bottom: "inception_5a/5x5" top: "inception_5a/5x5" - name: "inception_5a/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_5a/pool" + type: "Pooling" bottom: "pool4/3x3_s2" top: "inception_5a/pool" - name: "inception_5a/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -1939,15 +2163,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_5a/pool_proj" + type: "Convolution" bottom: "inception_5a/pool" top: "inception_5a/pool_proj" - name: "inception_5a/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -1961,30 +2189,34 @@ layers { } } } -layers { +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" bottom: "inception_5a/pool_proj" top: "inception_5a/pool_proj" - name: "inception_5a/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_5a/output" + type: "Concat" bottom: "inception_5a/1x1" bottom: "inception_5a/3x3" bottom: "inception_5a/5x5" bottom: "inception_5a/pool_proj" top: "inception_5a/output" - name: "inception_5a/output" - type: CONCAT } -layers { +layer { + name: "inception_5b/1x1" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/1x1" - name: "inception_5b/1x1" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 kernel_size: 1 @@ -1998,21 +2230,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" bottom: "inception_5b/1x1" top: "inception_5b/1x1" - name: "inception_5b/relu_1x1" - type: RELU } -layers { +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/3x3_reduce" - name: "inception_5b/3x3_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 192 kernel_size: 1 @@ -2026,21 +2262,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" bottom: "inception_5b/3x3_reduce" top: "inception_5b/3x3_reduce" - name: "inception_5b/relu_3x3_reduce" - type: RELU } -layers { +layer { + name: "inception_5b/3x3" + type: "Convolution" bottom: "inception_5b/3x3_reduce" top: "inception_5b/3x3" - name: "inception_5b/3x3" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -2055,21 +2295,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" bottom: "inception_5b/3x3" top: "inception_5b/3x3" - name: "inception_5b/relu_3x3" - type: RELU } -layers { +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" bottom: "inception_5a/output" top: "inception_5b/5x5_reduce" - name: "inception_5b/5x5_reduce" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 48 kernel_size: 1 @@ -2083,21 +2327,25 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" bottom: "inception_5b/5x5_reduce" top: "inception_5b/5x5_reduce" - name: "inception_5b/relu_5x5_reduce" - type: RELU } -layers { +layer { + name: "inception_5b/5x5" + type: "Convolution" bottom: "inception_5b/5x5_reduce" top: "inception_5b/5x5" - name: "inception_5b/5x5" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 pad: 2 @@ -2112,17 +2360,17 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" bottom: "inception_5b/5x5" top: "inception_5b/5x5" - name: "inception_5b/relu_5x5" - type: RELU } -layers { +layer { + name: "inception_5b/pool" + type: "Pooling" bottom: "inception_5a/output" top: "inception_5b/pool" - name: "inception_5b/pool" - type: POOLING pooling_param { pool: MAX kernel_size: 3 @@ -2130,15 +2378,19 @@ layers { pad: 1 } } -layers { +layer { + name: "inception_5b/pool_proj" + type: "Convolution" bottom: "inception_5b/pool" top: "inception_5b/pool_proj" - name: "inception_5b/pool_proj" - type: CONVOLUTION - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 128 kernel_size: 1 @@ -2152,50 +2404,54 @@ layers { } } } -layers { +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" bottom: "inception_5b/pool_proj" top: "inception_5b/pool_proj" - name: "inception_5b/relu_pool_proj" - type: RELU } -layers { +layer { + name: "inception_5b/output" + type: "Concat" bottom: "inception_5b/1x1" bottom: "inception_5b/3x3" bottom: "inception_5b/5x5" bottom: "inception_5b/pool_proj" top: "inception_5b/output" - name: "inception_5b/output" - type: CONCAT } -layers { +layer { + name: "pool5/7x7_s1" + type: "Pooling" bottom: "inception_5b/output" top: "pool5/7x7_s1" - name: "pool5/7x7_s1" - type: POOLING pooling_param { pool: AVE kernel_size: 7 stride: 1 } } -layers { +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" bottom: "pool5/7x7_s1" top: "pool5/7x7_s1" - name: "pool5/drop_7x7_s1" - type: DROPOUT dropout_param { dropout_ratio: 0.4 } } -layers { +layer { + name: "loss3/classifier" + type: "InnerProduct" bottom: "pool5/7x7_s1" top: "loss3/classifier" - name: "loss3/classifier" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -2207,34 +2463,34 @@ layers { } } } -layers { +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" bottom: "loss3/classifier" bottom: "label" top: "loss3/loss3" - name: "loss3/loss3" - type: SOFTMAX_LOSS loss_weight: 1 } -layers { +layer { + name: "loss3/top-1" + type: "Accuracy" bottom: "loss3/classifier" bottom: "label" top: "loss3/top-1" - name: "loss3/top-1" - type: ACCURACY include { phase: TEST } } -layers { +layer { + name: "loss3/top-5" + type: "Accuracy" bottom: "loss3/classifier" bottom: "label" top: "loss3/top-5" - name: "loss3/top-5" - type: ACCURACY - accuracy_param { - top_k: 5 - } include { phase: TEST } + accuracy_param { + top_k: 5 + } } diff --git a/models/bvlc_reference_caffenet/deploy.prototxt b/models/bvlc_reference_caffenet/deploy.prototxt index 4e494f420b5..29ccf1469f7 100644 --- a/models/bvlc_reference_caffenet/deploy.prototxt +++ b/models/bvlc_reference_caffenet/deploy.prototxt @@ -4,9 +4,9 @@ input_dim: 10 input_dim: 3 input_dim: 227 input_dim: 227 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -15,15 +15,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -32,9 +32,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -43,9 +43,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -55,15 +55,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -72,9 +72,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -83,9 +83,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -94,15 +94,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -112,15 +112,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -130,15 +130,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -147,66 +147,66 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8" inner_product_param { num_output: 1000 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8" top: "prob" } diff --git a/models/bvlc_reference_caffenet/train_val.prototxt b/models/bvlc_reference_caffenet/train_val.prototxt index 00fcc080261..c79472e09ab 100644 --- a/models/bvlc_reference_caffenet/train_val.prototxt +++ b/models/bvlc_reference_caffenet/train_val.prototxt @@ -1,18 +1,16 @@ name: "CaffeNet" -layers { +layer { name: "data" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/imagenet/ilsvrc12_train_lmdb" - backend: LMDB - batch_size: 256 + include { + phase: TRAIN } transform_param { + mirror: true crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: true } # mean pixel / channel-wise mean instead of mean image # transform_param { @@ -22,22 +20,24 @@ layers { # mean_value: 123 # mirror: true # } - include: { phase: TRAIN } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 256 + backend: LMDB + } } -layers { +layer { name: "data" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/imagenet/ilsvrc12_val_lmdb" - backend: LMDB - batch_size: 50 + include { + phase: TEST } transform_param { + mirror: false crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: false } # mean pixel / channel-wise mean instead of mean image # transform_param { @@ -47,17 +47,25 @@ layers { # mean_value: 123 # mirror: true # } - include: { phase: TEST } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -72,15 +80,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -89,9 +97,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -100,15 +108,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -124,15 +136,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -141,9 +153,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -152,15 +164,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -175,21 +191,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -205,21 +225,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -235,15 +259,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -252,15 +276,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -273,30 +301,34 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -309,30 +341,34 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -345,17 +381,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc8" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8" bottom: "label" top: "loss" diff --git a/models/bvlc_reference_rcnn_ilsvrc13/deploy.prototxt b/models/bvlc_reference_rcnn_ilsvrc13/deploy.prototxt index ef75a0a5e95..ea9cf98a926 100644 --- a/models/bvlc_reference_rcnn_ilsvrc13/deploy.prototxt +++ b/models/bvlc_reference_rcnn_ilsvrc13/deploy.prototxt @@ -4,9 +4,9 @@ input_dim: 10 input_dim: 3 input_dim: 227 input_dim: 227 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -15,15 +15,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -32,9 +32,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -43,9 +43,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -55,15 +55,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -72,9 +72,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -83,9 +83,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -94,15 +94,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -112,15 +112,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -130,15 +130,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -147,48 +147,48 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { @@ -196,9 +196,9 @@ layers { } } # R-CNN classification layer made from R-CNN ILSVRC13 SVMs. -layers { +layer { name: "fc-rcnn" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc-rcnn" inner_product_param { diff --git a/models/finetune_flickr_style/deploy.prototxt b/models/finetune_flickr_style/deploy.prototxt index aa2ad961874..4a924f74927 100644 --- a/models/finetune_flickr_style/deploy.prototxt +++ b/models/finetune_flickr_style/deploy.prototxt @@ -4,15 +4,19 @@ input_dim: 10 input_dim: 3 input_dim: 227 input_dim: 227 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -27,15 +31,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -44,9 +48,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -55,15 +59,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -79,15 +87,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -96,9 +104,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -107,15 +115,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -130,21 +142,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -160,21 +176,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -190,15 +210,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -207,15 +227,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -228,31 +252,35 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - # Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layer - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + # Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -265,31 +293,35 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8_flickr" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8_flickr" - # blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained - blobs_lr: 10 - blobs_lr: 20 - weight_decay: 1 - weight_decay: 0 + # lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } inner_product_param { num_output: 20 weight_filler { @@ -302,9 +334,9 @@ layers { } } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8_flickr" top: "prob" } diff --git a/models/finetune_flickr_style/train_val.prototxt b/models/finetune_flickr_style/train_val.prototxt index 7155c492360..aa9c73e17ce 100644 --- a/models/finetune_flickr_style/train_val.prototxt +++ b/models/finetune_flickr_style/train_val.prototxt @@ -1,49 +1,57 @@ name: "FlickrStyleCaffeNet" -layers { +layer { name: "data" - type: IMAGE_DATA + type: "ImageData" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } image_data_param { source: "data/flickr_style/train.txt" batch_size: 50 new_height: 256 new_width: 256 } - transform_param { - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: true - } - include: { phase: TRAIN } } -layers { +layer { name: "data" - type: IMAGE_DATA + type: "ImageData" top: "data" top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } image_data_param { source: "data/flickr_style/test.txt" batch_size: 50 new_height: 256 new_width: 256 } - transform_param { - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: false - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -58,15 +66,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -75,9 +83,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -86,15 +94,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -110,15 +122,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -127,9 +139,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -138,15 +150,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -161,21 +177,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -191,21 +211,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -221,15 +245,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -238,15 +262,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -259,31 +287,35 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - # Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layer - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + # Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -296,31 +328,35 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8_flickr" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8_flickr" - # blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained - blobs_lr: 10 - blobs_lr: 20 - weight_decay: 1 - weight_decay: 0 + # lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } inner_product_param { num_output: 20 weight_filler { @@ -333,17 +369,19 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8_flickr" bottom: "label" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc8_flickr" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } }