Hi everyone,
I am implementing Batch Normalization in my network to classify 3 classes of images.
Theoretically, I expect the accuracy should be equal or greater than not using Batch Norm. However, my classifier turns out to be biased to one class. It can not classify any image correctly for the other 2 classes.
Could anyone let me know a reason for this weird result?
My train_val..prototxt is configured as below:
name: "GoogleNet-Reduce"
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: false
#crop_size: 80
mean_file: "adc/kerfroi/adc_mean.binaryproto"
}
data_param {
source: "adc/kerfroi/train_lmdb"
batch_size: 16
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
#crop_size: 80
mean_file: "adc/kerfroi/adc_mean.binaryproto"
}
data_param {
source: "adc/kerfroi/val_lmdb"
batch_size: 32
backend: LMDB
}
}
layer {
name: "inception_3a/1x1"
type: "Convolution"
bottom: "data"
top: "inception_3a/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn1/n"
type: "BatchNorm"
bottom: "inception_3a/1x1"
top: "bn1/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn1"
type: "Scale"
bottom: "bn1/n"
top: "bn1"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_1x1"
type: "ReLU"
bottom: "bn1"
top: "inception_3a/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/3x3_reduce"
type: "Convolution"
bottom: "data"
top: "inception_3a/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn2/n"
type: "BatchNorm"
bottom: "inception_3a/3x3_reduce"
top: "bn2/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn2"
type: "Scale"
bottom: "bn2/n"
top: "bn2"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_3x3_reduce"
type: "ReLU"
bottom: "bn2"
top: "inception_3a/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/3x3"
type: "Convolution"
bottom: "inception_3a/relu_3x3_reduce"
top: "inception_3a/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn3/n"
type: "BatchNorm"
bottom: "inception_3a/3x3"
top: "bn3/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn3"
type: "Scale"
bottom: "bn3/n"
top: "bn3"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_3x3"
type: "ReLU"
bottom: "bn3"
top: "inception_3a/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/5x5_reduce"
type: "Convolution"
bottom: "data"
top: "inception_3a/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 16
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn4/n"
type: "BatchNorm"
bottom: "inception_3a/5x5_reduce"
top: "bn4/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn4"
type: "Scale"
bottom: "bn4/n"
top: "bn4"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_5x5_reduce"
type: "ReLU"
bottom: "bn4"
top: "inception_3a/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/5x5"
type: "Convolution"
bottom: "inception_3a/relu_5x5_reduce"
top: "inception_3a/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn5/n"
type: "BatchNorm"
bottom: "inception_3a/5x5"
top: "bn5/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn5"
type: "Scale"
bottom: "bn5/n"
top: "bn5"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_5x5"
type: "ReLU"
bottom: "bn5"
top: "inception_3a/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/pool"
type: "Pooling"
bottom: "data"
top: "inception_3a/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_3a/pool_proj"
type: "Convolution"
bottom: "inception_3a/pool"
top: "inception_3a/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn6/n"
type: "BatchNorm"
bottom: "inception_3a/pool_proj"
top: "bn6/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn6"
type: "Scale"
bottom: "bn6/n"
top: "bn6"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_pool_proj"
type: "ReLU"
bottom: "bn6"
top: "inception_3a/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/output"
type: "Concat"
bottom: "inception_3a/relu_1x1"
bottom: "inception_3a/relu_3x3"
bottom: "inception_3a/relu_5x5"
bottom: "inception_3a/relu_pool_proj"
top: "inception_3a/output"
}
layer {
name: "pool3/2x2_s2"
type: "Pooling"
bottom: "inception_3a/output"
top: "pool3/2x2_s2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "drop1"
type: "Dropout"
bottom: "pool3/2x2_s2"
top: "drop1"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "inception_3b/1x1"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn7/n"
type: "BatchNorm"
bottom: "inception_3b/1x1"
top: "bn7/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn7"
type: "Scale"
bottom: "bn7/n"
top: "bn7"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_1x1"
type: "ReLU"
bottom: "bn7"
top: "inception_3b/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/3x3_reduce"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn8/n"
type: "BatchNorm"
bottom: "inception_3b/3x3_reduce"
top: "bn8/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn8"
type: "Scale"
bottom: "bn8/n"
top: "bn8"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_3x3_reduce"
type: "ReLU"
bottom: "bn8"
top: "inception_3b/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/3x3"
type: "Convolution"
bottom: "inception_3b/relu_3x3_reduce"
top: "inception_3b/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn9/n"
type: "BatchNorm"
bottom: "inception_3b/3x3"
top: "bn9/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn9"
type: "Scale"
bottom: "bn9/n"
top: "bn9"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_3x3"
type: "ReLU"
bottom: "bn9"
top: "inception_3b/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/5x5_reduce"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn10/n"
type: "BatchNorm"
bottom: "inception_3b/5x5_reduce"
top: "bn10/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn10"
type: "Scale"
bottom: "bn10/n"
top: "bn10"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_5x5_reduce"
type: "ReLU"
bottom: "bn10"
top: "inception_3b/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/5x5"
type: "Convolution"
bottom: "inception_3b/relu_5x5_reduce"
top: "inception_3b/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn11/n"
type: "BatchNorm"
bottom: "inception_3b/5x5"
top: "bn11/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn11"
type: "Scale"
bottom: "bn11/n"
top: "bn11"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_5x5"
type: "ReLU"
bottom: "bn11"
top: "inception_3b/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/pool"
type: "Pooling"
bottom: "drop1"
top: "inception_3b/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_3b/pool_proj"
type: "Convolution"
bottom: "inception_3b/pool"
top: "inception_3b/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn12/n"
type: "BatchNorm"
bottom: "inception_3b/pool_proj"
top: "bn12/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn12"
type: "Scale"
bottom: "bn12/n"
top: "bn12"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_pool_proj"
type: "ReLU"
bottom: "bn12"
top: "inception_3b/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/output"
type: "Concat"
bottom: "inception_3b/relu_1x1"
bottom: "inception_3b/relu_3x3"
bottom: "inception_3b/relu_5x5"
bottom: "inception_3b/relu_pool_proj"
top: "inception_3b/output"
}
layer {
name: "pool3/3x3_s2"
type: "Pooling"
bottom: "inception_3b/output"
top: "pool3/3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "drop2"
type: "Dropout"
bottom: "pool3/3x3_s2"
top: "drop2"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "inception_4a/1x1"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn13/n"
type: "BatchNorm"
bottom: "inception_4a/1x1"
top: "bn13/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn13"
type: "Scale"
bottom: "bn13/n"
top: "bn13"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_1x1"
type: "ReLU"
bottom: "bn13"
top: "inception_4a/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/3x3_reduce"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn14/n"
type: "BatchNorm"
bottom: "inception_4a/3x3_reduce"
top: "bn14/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn14"
type: "Scale"
bottom: "bn14/n"
top: "bn14"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_3x3_reduce"
type: "ReLU"
bottom: "bn14"
top: "inception_4a/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/3x3"
type: "Convolution"
bottom: "inception_4a/relu_3x3_reduce"
top: "inception_4a/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn15/n"
type: "BatchNorm"
bottom: "inception_4a/3x3"
top: "bn15/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn15"
type: "Scale"
bottom: "bn15/n"
top: "bn15"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_3x3"
type: "ReLU"
bottom: "bn15"
top: "inception_4a/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/5x5_reduce"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn16/n"
type: "BatchNorm"
bottom: "inception_4a/5x5_reduce"
top: "bn16/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn16"
type: "Scale"
bottom: "bn16/n"
top: "bn16"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_5x5_reduce"
type: "ReLU"
bottom: "bn16"
top: "inception_4a/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/5x5"
type: "Convolution"
bottom: "inception_4a/relu_5x5_reduce"
top: "inception_4a/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn17/n"
type: "BatchNorm"
bottom: "inception_4a/5x5"
top: "bn17/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn17"
type: "Scale"
bottom: "bn17/n"
top: "bn17"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_5x5"
type: "ReLU"
bottom: "bn17"
top: "inception_4a/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/pool"
type: "Pooling"
bottom: "drop2"
top: "inception_4a/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_4a/pool_proj"
type: "Convolution"
bottom: "inception_4a/pool"
top: "inception_4a/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn18/n"
type: "BatchNorm"
bottom: "inception_4a/pool_proj"
top: "bn18/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn18"
type: "Scale"
bottom: "bn18/n"
top: "bn18"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_pool_proj"
type: "ReLU"
bottom: "bn18"
top: "inception_4a/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/output"
type: "Concat"
bottom: "inception_4a/relu_1x1"
bottom: "inception_4a/relu_3x3"
bottom: "inception_4a/relu_5x5"
bottom: "inception_4a/relu_pool_proj"
top: "inception_4a/output"
}
layer {
name: "pool4/3x3_s2"
type: "Pooling"
bottom: "inception_4a/output"
top: "pool4/3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "drop3"
type: "Dropout"
bottom: "pool4/3x3_s2"
top: "drop3"
dropout_param {
dropout_ratio: 0.3
}
}
# Get the first predictor
layer {
name: "loss0/ave_pool"
type: "Pooling"
bottom: "drop2"
top: "loss0/ave_pool"
pooling_param {
pool: AVE
global_pooling: true
}
}
layer {
name: "loss0/fc"
type: "InnerProduct"
bottom: "loss0/ave_pool"
top: "loss0/fc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn19/n"
type: "BatchNorm"
bottom: "loss0/fc"
top: "bn19/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn19"
type: "Scale"
bottom: "bn19/n"
top: "bn19"
scale_param {
bias_term: true
}
}
layer {
name: "loss0/relu_fc"
type: "ReLU"
bottom: "bn19"
top: "loss0/relu_fc"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "loss0/classifier"
type: "InnerProduct"
bottom: "loss0/relu_fc"
top: "loss0/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
# Get the second predictor
layer {
name: "loss1/ave_pool"
type: "Pooling"
bottom: "drop3"
top: "loss1/ave_pool"
pooling_param {
pool: AVE
global_pooling: true
}
}
layer {
name: "loss1/fc"
type: "InnerProduct"
bottom: "loss1/ave_pool"
top: "loss1/fc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn20/n"
type: "BatchNorm"
bottom: "loss1/fc"
top: "bn20/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn20"
type: "Scale"
bottom: "bn20/n"
top: "bn20"
scale_param {
bias_term: true
}
}
layer {
name: "loss1/relu_fc"
type: "ReLU"
bottom: "bn20"
top: "loss1/relu_fc"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "loss1/classifier"
type: "InnerProduct"
bottom: "loss1/relu_fc"
top: "loss1/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
#Combine 2 predictors
layer {
name: "celoss0"
type: "SoftmaxWithLoss"
bottom: "loss0/classifier"
bottom: "label"
top: "celoss0"
loss_weight: 0.3
}
layer {
name: "celoss1"
type: "SoftmaxWithLoss"
bottom: "loss1/classifier"
bottom: "label"
top: "celoss1"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "loss1/classifier"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss/top-5"
type: "Accuracy"
bottom: "loss1/classifier"
bottom: "label"
top: "loss/top-5"
include {
phase: TEST
}
accuracy_param {
top_k: 3
}
}
And the deployt.prototxt
name: "GoogleNet-Reduce"
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: false
#crop_size: 80
mean_file: "adc/kerfroi/adc_mean.binaryproto"
}
data_param {
source: "adc/kerfroi/train_lmdb"
batch_size: 16
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
#crop_size: 80
mean_file: "adc/kerfroi/adc_mean.binaryproto"
}
data_param {
source: "adc/kerfroi/val_lmdb"
batch_size: 32
backend: LMDB
}
}
layer {
name: "inception_3a/1x1"
type: "Convolution"
bottom: "data"
top: "inception_3a/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn1/n"
type: "BatchNorm"
bottom: "inception_3a/1x1"
top: "bn1/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn1"
type: "Scale"
bottom: "bn1/n"
top: "bn1"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_1x1"
type: "ReLU"
bottom: "bn1"
top: "inception_3a/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/3x3_reduce"
type: "Convolution"
bottom: "data"
top: "inception_3a/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn2/n"
type: "BatchNorm"
bottom: "inception_3a/3x3_reduce"
top: "bn2/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn2"
type: "Scale"
bottom: "bn2/n"
top: "bn2"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_3x3_reduce"
type: "ReLU"
bottom: "bn2"
top: "inception_3a/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/3x3"
type: "Convolution"
bottom: "inception_3a/relu_3x3_reduce"
top: "inception_3a/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn3/n"
type: "BatchNorm"
bottom: "inception_3a/3x3"
top: "bn3/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn3"
type: "Scale"
bottom: "bn3/n"
top: "bn3"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_3x3"
type: "ReLU"
bottom: "bn3"
top: "inception_3a/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/5x5_reduce"
type: "Convolution"
bottom: "data"
top: "inception_3a/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 16
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn4/n"
type: "BatchNorm"
bottom: "inception_3a/5x5_reduce"
top: "bn4/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn4"
type: "Scale"
bottom: "bn4/n"
top: "bn4"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_5x5_reduce"
type: "ReLU"
bottom: "bn4"
top: "inception_3a/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/5x5"
type: "Convolution"
bottom: "inception_3a/relu_5x5_reduce"
top: "inception_3a/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn5/n"
type: "BatchNorm"
bottom: "inception_3a/5x5"
top: "bn5/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn5"
type: "Scale"
bottom: "bn5/n"
top: "bn5"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_5x5"
type: "ReLU"
bottom: "bn5"
top: "inception_3a/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/pool"
type: "Pooling"
bottom: "data"
top: "inception_3a/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_3a/pool_proj"
type: "Convolution"
bottom: "inception_3a/pool"
top: "inception_3a/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn6/n"
type: "BatchNorm"
bottom: "inception_3a/pool_proj"
top: "bn6/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn6"
type: "Scale"
bottom: "bn6/n"
top: "bn6"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3a/relu_pool_proj"
type: "ReLU"
bottom: "bn6"
top: "inception_3a/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3a/output"
type: "Concat"
bottom: "inception_3a/relu_1x1"
bottom: "inception_3a/relu_3x3"
bottom: "inception_3a/relu_5x5"
bottom: "inception_3a/relu_pool_proj"
top: "inception_3a/output"
}
layer {
name: "pool3/2x2_s2"
type: "Pooling"
bottom: "inception_3a/output"
top: "pool3/2x2_s2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "drop1"
type: "Dropout"
bottom: "pool3/2x2_s2"
top: "drop1"
dropout_param {
dropout_ratio: 0.1
}
}
layer {
name: "inception_3b/1x1"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn7/n"
type: "BatchNorm"
bottom: "inception_3b/1x1"
top: "bn7/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn7"
type: "Scale"
bottom: "bn7/n"
top: "bn7"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_1x1"
type: "ReLU"
bottom: "bn7"
top: "inception_3b/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/3x3_reduce"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn8/n"
type: "BatchNorm"
bottom: "inception_3b/3x3_reduce"
top: "bn8/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn8"
type: "Scale"
bottom: "bn8/n"
top: "bn8"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_3x3_reduce"
type: "ReLU"
bottom: "bn8"
top: "inception_3b/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/3x3"
type: "Convolution"
bottom: "inception_3b/relu_3x3_reduce"
top: "inception_3b/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn9/n"
type: "BatchNorm"
bottom: "inception_3b/3x3"
top: "bn9/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn9"
type: "Scale"
bottom: "bn9/n"
top: "bn9"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_3x3"
type: "ReLU"
bottom: "bn9"
top: "inception_3b/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/5x5_reduce"
type: "Convolution"
bottom: "drop1"
top: "inception_3b/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 32
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn10/n"
type: "BatchNorm"
bottom: "inception_3b/5x5_reduce"
top: "bn10/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn10"
type: "Scale"
bottom: "bn10/n"
top: "bn10"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_5x5_reduce"
type: "ReLU"
bottom: "bn10"
top: "inception_3b/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/5x5"
type: "Convolution"
bottom: "inception_3b/relu_5x5_reduce"
top: "inception_3b/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn11/n"
type: "BatchNorm"
bottom: "inception_3b/5x5"
top: "bn11/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn11"
type: "Scale"
bottom: "bn11/n"
top: "bn11"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_5x5"
type: "ReLU"
bottom: "bn11"
top: "inception_3b/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/pool"
type: "Pooling"
bottom: "drop1"
top: "inception_3b/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_3b/pool_proj"
type: "Convolution"
bottom: "inception_3b/pool"
top: "inception_3b/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn12/n"
type: "BatchNorm"
bottom: "inception_3b/pool_proj"
top: "bn12/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn12"
type: "Scale"
bottom: "bn12/n"
top: "bn12"
scale_param {
bias_term: true
}
}
layer {
name: "inception_3b/relu_pool_proj"
type: "ReLU"
bottom: "bn12"
top: "inception_3b/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_3b/output"
type: "Concat"
bottom: "inception_3b/relu_1x1"
bottom: "inception_3b/relu_3x3"
bottom: "inception_3b/relu_5x5"
bottom: "inception_3b/relu_pool_proj"
top: "inception_3b/output"
}
layer {
name: "pool3/3x3_s2"
type: "Pooling"
bottom: "inception_3b/output"
top: "pool3/3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "drop2"
type: "Dropout"
bottom: "pool3/3x3_s2"
top: "drop2"
dropout_param {
dropout_ratio: 0.2
}
}
layer {
name: "inception_4a/1x1"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/1x1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn13/n"
type: "BatchNorm"
bottom: "inception_4a/1x1"
top: "bn13/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn13"
type: "Scale"
bottom: "bn13/n"
top: "bn13"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_1x1"
type: "ReLU"
bottom: "bn13"
top: "inception_4a/relu_1x1"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/3x3_reduce"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/3x3_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 256
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn14/n"
type: "BatchNorm"
bottom: "inception_4a/3x3_reduce"
top: "bn14/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn14"
type: "Scale"
bottom: "bn14/n"
top: "bn14"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_3x3_reduce"
type: "ReLU"
bottom: "bn14"
top: "inception_4a/relu_3x3_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/3x3"
type: "Convolution"
bottom: "inception_4a/relu_3x3_reduce"
top: "inception_4a/3x3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn15/n"
type: "BatchNorm"
bottom: "inception_4a/3x3"
top: "bn15/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn15"
type: "Scale"
bottom: "bn15/n"
top: "bn15"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_3x3"
type: "ReLU"
bottom: "bn15"
top: "inception_4a/relu_3x3"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/5x5_reduce"
type: "Convolution"
bottom: "drop2"
top: "inception_4a/5x5_reduce"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 64
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn16/n"
type: "BatchNorm"
bottom: "inception_4a/5x5_reduce"
top: "bn16/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn16"
type: "Scale"
bottom: "bn16/n"
top: "bn16"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_5x5_reduce"
type: "ReLU"
bottom: "bn16"
top: "inception_4a/relu_5x5_reduce"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/5x5"
type: "Convolution"
bottom: "inception_4a/relu_5x5_reduce"
top: "inception_4a/5x5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
pad: 2
kernel_size: 5
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn17/n"
type: "BatchNorm"
bottom: "inception_4a/5x5"
top: "bn17/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn17"
type: "Scale"
bottom: "bn17/n"
top: "bn17"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_5x5"
type: "ReLU"
bottom: "bn17"
top: "inception_4a/relu_5x5"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/pool"
type: "Pooling"
bottom: "drop2"
top: "inception_4a/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
name: "inception_4a/pool_proj"
type: "Convolution"
bottom: "inception_4a/pool"
top: "inception_4a/pool_proj"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
convolution_param {
num_output: 128
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn18/n"
type: "BatchNorm"
bottom: "inception_4a/pool_proj"
top: "bn18/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn18"
type: "Scale"
bottom: "bn18/n"
top: "bn18"
scale_param {
bias_term: true
}
}
layer {
name: "inception_4a/relu_pool_proj"
type: "ReLU"
bottom: "bn18"
top: "inception_4a/relu_pool_proj"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "inception_4a/output"
type: "Concat"
bottom: "inception_4a/relu_1x1"
bottom: "inception_4a/relu_3x3"
bottom: "inception_4a/relu_5x5"
bottom: "inception_4a/relu_pool_proj"
top: "inception_4a/output"
}
layer {
name: "pool4/3x3_s2"
type: "Pooling"
bottom: "inception_4a/output"
top: "pool4/3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "drop3"
type: "Dropout"
bottom: "pool4/3x3_s2"
top: "drop3"
dropout_param {
dropout_ratio: 0.3
}
}
# Get the first predictor
layer {
name: "loss0/ave_pool"
type: "Pooling"
bottom: "drop2"
top: "loss0/ave_pool"
pooling_param {
pool: AVE
global_pooling: true
}
}
layer {
name: "loss0/fc"
type: "InnerProduct"
bottom: "loss0/ave_pool"
top: "loss0/fc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn19/n"
type: "BatchNorm"
bottom: "loss0/fc"
top: "bn19/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn19"
type: "Scale"
bottom: "bn19/n"
top: "bn19"
scale_param {
bias_term: true
}
}
layer {
name: "loss0/relu_fc"
type: "ReLU"
bottom: "bn19"
top: "loss0/relu_fc"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "loss0/classifier"
type: "InnerProduct"
bottom: "loss0/relu_fc"
top: "loss0/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
# Get the second predictor
layer {
name: "loss1/ave_pool"
type: "Pooling"
bottom: "drop3"
top: "loss1/ave_pool"
pooling_param {
pool: AVE
global_pooling: true
}
}
layer {
name: "loss1/fc"
type: "InnerProduct"
bottom: "loss1/ave_pool"
top: "loss1/fc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "bn20/n"
type: "BatchNorm"
bottom: "loss1/fc"
top: "bn20/n"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "bn20"
type: "Scale"
bottom: "bn20/n"
top: "bn20"
scale_param {
bias_term: true
}
}
layer {
name: "loss1/relu_fc"
type: "ReLU"
bottom: "bn20"
top: "loss1/relu_fc"
relu_param {
negative_slope: 0.01
}
}
layer {
name: "loss1/classifier"
type: "InnerProduct"
bottom: "loss1/relu_fc"
top: "loss1/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 1
}
inner_product_param {
num_output: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
#Combine 2 predictors
layer {
name: "celoss0"
type: "SoftmaxWithLoss"
bottom: "loss0/classifier"
bottom: "label"
top: "celoss0"
loss_weight: 0.3
}
layer {
name: "celoss1"
type: "SoftmaxWithLoss"
bottom: "loss1/classifier"
bottom: "label"
top: "celoss1"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "loss1/classifier"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss/top-5"
type: "Accuracy"
bottom: "loss1/classifier"
bottom: "label"
top: "loss/top-5"
include {
phase: TEST
}
accuracy_param {
top_k: 3
}
}
Many thanks!!!