# VGG 16-layer network convolutional finetuning
# Network modified to have smaller receptive field (128 pixels)
# and smaller stride (8 pixels) when run in convolutional mode.
#
# For alignment to work, we set:
# (1) input dimension equal to
# $n = 8 * k + 2$, e.g., 306 (for k = 38)
# (2) dimension after 3rd max-pooling (centered at -3.5)
# $m = k + 2$ (40 if k = 38)
# (3) dimension after 4th max-pooling (centered at -1.5)
# $m = k + 1$ (39 if k = 38)
# (4) Crop 1 pixels at the begin of label map and shrink by 8
# to produce the expected $m$
#
name: "${NET_ID}"
layer {
name: "data"
type: "ImageSegData"
top: "data"
top: "label"
image_data_param {
root_folder: "${DATA_ROOT}"
source: "${EXP}/list/${TRAIN_SET}.txt"
label_type: PIXEL
batch_size: 10
shuffle: true
}
transform_param {
# Use BGR as order!
# Use matlab script : calc_bgr_image_set_mean.m
mean_value: 34.7887
mean_value: 27.7252
mean_value: 38.9483
crop_size: 306
mirror: true
}
include: { phase: TRAIN }
}
### NETWORK ###
layer {
bottom: "data"
top: "conv1_1"
name: "conv1_1"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv1_1"
top: "conv1_1"
name: "relu1_1"
type: "ReLU"
}
layer {
bottom: "conv1_1"
top: "conv1_2"
name: "conv1_2"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "relu1_2"
type: "ReLU"
}
layer {
bottom: "conv1_2"
top: "pool1"
name: "pool1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 1
}
}
layer {
bottom: "pool1"
top: "conv2_1"
name: "conv2_1"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv2_1"
top: "conv2_1"
name: "relu2_1"
type: "ReLU"
}
layer {
bottom: "conv2_1"
top: "conv2_2"
name: "conv2_2"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv2_2"
top: "conv2_2"
name: "relu2_2"
type: "ReLU"
}
layer {
bottom: "conv2_2"
top: "pool2"
name: "pool2"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 1
}
}
layer {
bottom: "pool2"
top: "conv3_1"
name: "conv3_1"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv3_1"
top: "conv3_1"
name: "relu3_1"
type: "ReLU"
}
layer {
bottom: "conv3_1"
top: "conv3_2"
name: "conv3_2"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv3_2"
top: "conv3_2"
name: "relu3_2"
type: "ReLU"
}
layer {
bottom: "conv3_2"
top: "conv3_3"
name: "conv3_3"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv3_3"
top: "conv3_3"
name: "relu3_3"
type: "ReLU"
}
layer {
bottom: "conv3_3"
top: "pool3"
name: "pool3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 1
}
}
layer {
bottom: "pool3"
top: "conv4_1"
name: "conv4_1"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv4_1"
top: "conv4_1"
name: "relu4_1"
type: "ReLU"
}
layer {
bottom: "conv4_1"
top: "conv4_2"
name: "conv4_2"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv4_2"
top: "conv4_2"
name: "relu4_2"
type: "ReLU"
}
layer {
bottom: "conv4_2"
top: "conv4_3"
name: "conv4_3"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv4_3"
top: "conv4_3"
name: "relu4_3"
type: "ReLU"
}
layer {
bottom: "conv4_3"
top: "pool4"
name: "pool4"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
#pad: 1
#stride: 2
stride: 1
}
}
layer {
bottom: "pool4"
top: "conv5_1"
name: "conv5_1"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
#pad: 1
pad: 2
#hole is for V1, use 'dilation' instead
#hole: 2
dilation: 2
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv5_1"
top: "conv5_1"
name: "relu5_1"
type: "ReLU"
}
layer {
bottom: "conv5_1"
top: "conv5_2"
name: "conv5_2"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
#pad: 1
pad: 2
#hole is for V1, use 'dilation' instead for V2
#hole: 2
dilation: 2
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv5_2"
top: "conv5_2"
name: "relu5_2"
type: "ReLU"
}
layer {
bottom: "conv5_2"
top: "conv5_3"
name: "conv5_3"
type: "Convolution"
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
#pad: 1
pad: 2
#hole is for V1, use 'dilation' instead for V2
#hole: 2
dilation: 2
kernel_size: 3
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "conv5_3"
top: "conv5_3"
name: "relu5_3"
type: "ReLU"
}
layer {
bottom: "conv5_3"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
pool: MAX
#kernel_size: 2
#stride: 2
kernel_size: 3
stride: 1
pad: 1
}
}
layer {
bottom: "pool5"
top: "fc6"
name: "fc6"
type: "Convolution"
# Works in V1, does not in V2?
# strict_dim: false
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 6
#hole is for V1, use 'dilation' instead for V2
#hole: 4
dilation: 4
kernel_size: 4
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "fc6"
top: "fc6"
name: "relu6"
type: "ReLU"
}
layer {
bottom: "fc6"
top: "fc6"
name: "drop6"
type: "Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom: "fc6"
top: "fc7"
name: "fc7"
type: "Convolution"
# This parameter seems deprecated in V2
# strict_dim: false
# These parameter do not seem to be parsed in V2
#blobs_lr: 1
#blobs_lr: 2
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
kernel_size: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
bottom: "fc7"
top: "fc7"
name: "relu7"
type: "ReLU"
}
layer {
bottom: "fc7"
top: "fc7"
name: "drop7"
type: "Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom: "fc7"
top: "fc8_synth_to_real"
name: "fc8_synth_to_real"
type: "Convolution"
# This parameter seems deprecated in V2
#strict_dim: false
# These parameter do not seem to be parsed in V2
#blobs_lr: 10
#blobs_lr: 20
#weight_decay: 1
#weight_decay: 0
# For V2 use these instead
param {
lr_mult: 10
decay_mult: 1
}
param {
lr_mult: 20
decay_mult: 0
}
convolution_param {
num_output: ${NUM_LABELS}
kernel_size: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "label"
top: "label_shrink"
name: "label_shrink"
type: "Interp"
interp_param {
shrink_factor: 8
pad_beg: -1
pad_end: 0
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8_synth_to_real"
bottom: "label_shrink"
top: "loss"
# Use this instead for V2
loss_param {
ignore_label: 255
normalize : true
}
#include: { phase: TRAIN }
}
layer {
name: "accuracy"
type: "SegAccuracy"
bottom: "fc8_synth_to_real"
bottom: "label_shrink"
top: "accuracy"
seg_accuracy_param {
ignore_label: 255
}
}
# layer {
# name: "im_data"
# type: IMSHOW
# bottom: "data"
# }
# layer {
# name: "im_scores"
# type: IMSHOW
# bottom: "fc8_pascal"
# }
#layer {
# name: "fc8_mat"
# type: "MatWrite"
# bottom: "fc8_synth_to_real"
# mat_write_param {
# #prefix: "voc12/features/${NET_ID}/${TEST_SET}/fc8/"
# #source: "voc12/list/${TEST_SET}_id.txt"
# prefix: "${EXP}/features/${NET_ID}/${TEST_SET}/fc8/"
# source: "${EXP}/list/${TEST_SET}_id.txt"
# strip: 0
# period: 1
# }
# include: { phase: TEST }
#}