FCN finetuning on Alexnet

323 views
Skip to first unread message

Christos Apostolopoulos

unread,
Mar 19, 2016, 6:41:24 PM3/19/16
to Caffe Users
Hi guys,

I know allot of posts have been made regarding FCN but I am running into a dead end so decided to make this post. My goal is to finetune the FCN-AlexNet (https://gist.github.com/shelhamer/3f2c75f3c8c71357f24c#file-readme.md) in order to recognize more accurately people. I have approximately 5.5k images of segmented people which I choose to mirror as well in caffe to augment my data. So I've taken the following steps:

1) Downloaded jon long's caffe-future .zip file
2) Made everything (make all, make test, make pycaffe, make distribute)
3) created lmdb files for train/val, where my labels are matrices in the format of 0 and 1 (0 background, 1 person). The python script I used can be seen at the end of the page I linked above.
4) Runned solve.py in order to init the deconv layers. Note: I got an error due to the group: 21 param in the upsample layer. When I commented this out, everything worked fine and my outputs continued being of the same size as I wanted. Not sure if this is the right way to go though.

5) Started to finetune. My loss begins and stays extremely low though

I0319 15:25:03.457247  8641 solver.cpp:242] Iteration 0, loss = 0.693147
I0319 15:25:03.457295  8641 solver.cpp:258]     Train net output #0: loss = 0.693147 (* 1 = 0.693147 loss)
I0319 15:25:03.457307  8641 solver.cpp:571] Iteration 0, lr = 1e-10
I0319 15:25:19.745345  8641 solver.cpp:242] Iteration 20, loss = 0.693146
I0319 15:25:19.745398  8641 solver.cpp:258]     Train net output #0: loss = 0.693143 (* 1 = 0.693143 loss)
I0319 15:25:19.745410  8641 solver.cpp:571] Iteration 20, lr = 1e-10
I0319 15:25:38.227344  8641 solver.cpp:242] Iteration 40, loss = 0.69314
I0319 15:25:38.227401  8641 solver.cpp:258]     Train net output #0: loss = 0.693134 (* 1 = 0.693134 loss)
I0319 15:25:38.227412  8641 solver.cpp:571] Iteration 40, lr = 1e-10
I0319 15:25:57.472482  8641 solver.cpp:242] Iteration 60, loss = 0.69313
I0319 15:25:57.472544  8641 solver.cpp:258]     Train net output #0: loss = 0.693124 (* 1 = 0.693124 loss)
I0319 15:25:57.472556  8641 solver.cpp:571] Iteration 60, lr = 1e-10
I0319 15:26:16.659310  8641 solver.cpp:242] Iteration 80, loss = 0.693115
I0319 15:26:16.659356  8641 solver.cpp:258]     Train net output #0: loss = 0.693123 (* 1 = 0.693123 loss)



this is the behavior throughout the finetuning. I changed the names of the corresponding layers as well at the train val prototxt

name: "FCN-AlexNet"
layer {
  name: "data"
  type: "Data"
  top: "data"
  include {
    phase: TRAIN
  }
  transform_param {
    mean_value: 104.00699
    mean_value: 116.66877
    mean_value: 122.67892
    mirror: 1
   # crop_size: 227
  }
  data_param {
    source: "../examples/finetune_FCN_alexnet/TrainVOC_Data_lmdb/"
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "data"
  type: "Data"
  top: "data"
  include {
    phase: TEST
  }
  transform_param {
    mean_value: 104.00699
    mean_value: 116.66877
    mean_value: 122.67892
    mirror: 1
   # crop_size: 227
  }
  data_param {
    source: "../examples/finetune_FCN_alexnet/TestVOC_Data_lmdb/"
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "label"
  type: "Data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: 1
    #crop_size: 227
  }
  data_param {
    source: "../examples/finetune_FCN_alexnet/TrainVOC_Label_lmdb/"
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "label"
  type: "Data"
  top: "label"
  include {
    phase: TEST
  }
  transform_param {
    mirror: 1
    #crop_size: 227
  }
  data_param {
    source: "../examples/finetune_FCN_alexnet/TestVOC_Label_lmdb/"
    batch_size: 1
    backend: LMDB
  }
}

layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
    pad: 100
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "pool1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "norm1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "pool2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "norm2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "Convolution"
  bottom: "pool5"
  top: "fc6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4096
    kernel_size: 6
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc7"
  type: "Convolution"
  bottom: "fc6"
  top: "fc7"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4096
    kernel_size: 1
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc7"
  top: "fc7"
}
layer {
  name: "drop7"
  type: "Dropout"
  bottom: "fc7"
  top: "fc7"
  dropout_param {
    dropout_ratio: 0.5
  }
}

layer {
  name: "score-fr2"
  type: "Convolution"
  bottom: "fc7"
  top: "score-fr2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    kernel_size: 1
    engine: CAFFE
  }
}
layer {
  name: "upsample2"
  type: "Deconvolution"
  bottom: "score-fr2"
  top: "bigscore"
  param {
    lr_mult: 0
  }
  param {
    lr_mult: 0
  }
  convolution_param {
    num_output: 2
    kernel_size: 63
    #group: 2
    stride: 32
  }
}
layer {
  name: "crop2"
  type: "Crop"
  bottom: "bigscore"
  bottom: "data"
  top: "score"
}
layer {
  name: "prob"
  type: "SoftmaxWithLoss"
  bottom: "score"
  bottom: "label"
  top: "loss"
  loss_param {
    ignore_label: 255
    normalize: true
    # normalize: false
  }
}


Any help would be appreciated since I'm stuck!!

Christos Apostolopoulos

unread,
Mar 19, 2016, 11:40:21 PM3/19/16
to Caffe Users
UPDATE:

So I hadnt done the net surgery recommended. Although now that im trying to do that im getting the following error : ValueError: could not broadcast input array from shape (1000) into shape (2)


Basically in the fc8 layer, because I want my convolution output to be 2 (since im trying to have only background and person). But the transform is not possible. Any ideas ?
Reply all
Reply to author
Forward
0 new messages