Hi, my purpose is to set-up a network made up by to independent branch of convolutional layer (i.e. layer with different kernel size or stride) and concatenate their outputs in a single fully connected layer. Looking around I found it is possible but I didn't get a network example in order to understand how to configure the prototxt file. Can some one help me?
Just as an example I aims to do something like the following (ReLU and Pooling layer have been omitted for readability)
Both conv1 and conv2 take as input the input data layer and proceed in parallel. Finally the fc6 should take as input both the con1 and conv2 output.
What I have to write in the field "bottom" of the fc6 layer?
name: "CaffeNet"
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/marco/Desktop/caffeCUDNN/examples/morphCrop/lmdb/Test_fold_is_0/gender_train_lmdb"
backend: LMDB
batch_size: 400
}
transform_param {
mean_file: "/home/marco/Desktop/caffeCUDNN/examples/morphCrop/mean_image/Test_fold_is_0/mean.binaryproto"
mirror: true
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: DATA
top: "data"
top: "label"
data_param {
source: "/home/marco/Desktop/caffeCUDNN/examples/morphCrop/lmdb/Test_fold_is_0/gender_val_lmdb"
backend: LMDB
batch_size: 100
}
transform_param {
mean_file: "/home/marco/Desktop/caffeCUDNN/examples/morphCrop/mean_image/Test_fold_is_0/mean.binaryproto"
mirror: false
}
include: { phase: TEST }
}
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 96
kernel_size: 7
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "data"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "conv1", "conv2" ---------------------->>>>> Critical point
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 512
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 50
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "accuracy"
type: ACCURACY
bottom: "fc8"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
}
layers {
name: "loss"
type: SOFTMAX_LOSS
bottom: "fc8"
bottom: "label"
top: "loss"
}