something wrong with train/deploy prototxt files

65 views
Skip to first unread message

Ehab Albadawy

unread,
Nov 15, 2016, 9:57:09 PM11/15/16
to Caffe Users
I'm trying to build a network but something wrong is going with it, no matter how many iteration it has I always get the same output in testing


## train.prototxt

layer
{
  name
: "data"
  type
: "ImageData"
  top
: "data"
  top
: "label"
  transform_param
{
    mirror
: true
 
}
  image_data_param
{
    source
: "/path/to/train.txt"
    batch_size
: 1500
 
}
}
layer
{
  name
: "conv1"
  type
: "Convolution"
  bottom
: "data"
  top
: "conv1"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 64
    pad
: 1
    kernel_size
: 3
    stride
: 1
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu1"
  type
: "ReLU"
  bottom
: "conv1"
  top
: "conv1"
}
layer
{
  name
: "conv2"
  type
: "Convolution"
  bottom
: "conv1"
  top
: "conv2"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 64
    pad
: 1
    kernel_size
: 3
    stride
: 1
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu2"
  type
: "ReLU"
  bottom
: "conv2"
  top
: "conv2"
}
layer
{
  name
: "pool1"
  type
: "Pooling"
  bottom
: "conv2"
  top
: "pool1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
    pad
: 0
 
}
}
layer
{
  name
: "conv3"
  type
: "Convolution"
  bottom
: "pool1"
  top
: "conv3"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 128
    pad
: 1
    kernel_size
: 3
    stride
: 1
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "conv4"
  type
: "Convolution"
  bottom
: "conv3"
  top
: "conv4"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 128
    pad
: 1
    kernel_size
: 3
    stride
: 1
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 1
   
}
 
}
}
layer
{
  name
: "relu3"
  type
: "ReLU"
  bottom
: "conv4"
  top
: "conv4"
}
layer
{
  name
: "pool2"
  type
: "Pooling"
  bottom
: "conv4"
  top
: "pool2"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
    pad
: 0
 
}
}
layer
{
  name
: "fc6"
  type
: "InnerProduct"
  bottom
: "pool2"
  top
: "fc6"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 256
    weight_filler
{
      type
: "gaussian"
      std
: 0.005
   
}
    bias_filler
{
      type
: "constant"
      value
: 0.1
   
}
 
}
}
layer
{
  name
: "drop1"
  type
: "Dropout"
  bottom
: "fc6"
  top
: "fc6"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "fc7"
  type
: "InnerProduct"
  bottom
: "fc6"
  top
: "fc7"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 256
    weight_filler
{
      type
: "gaussian"
      std
: 0.005
   
}
    bias_filler
{
      type
: "constant"
      value
: 0.1
   
}
 
}
}
layer
{
  name
: "drop2"
  type
: "Dropout"
  bottom
: "fc7"
  top
: "fc7"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "score"
  type
: "InnerProduct"
  bottom
: "fc7"
  top
: "score"
  param
{
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 5
 
}
}
layer
{
  name
: "loss"
  type
: "SoftmaxWithLoss"
  bottom
: "score"
  bottom
: "label"
  top
: "loss"
}
layer
{
  name
: "acc"
  type
: "Accuracy"
  bottom
: "score"
  bottom
: "label"
  top
: "acc"
}
layer
{
  name
: "probs"
  type
: "Softmax"
  bottom
: "score"
  top
: "probs"
}





## deploy.prototxt

layer
{
  name
: "data"
  type
: "Input"
  top
: "data"
  input_param
{ shape: { dim: 1 dim: 3 dim: 33 dim: 33 } }
}
layer
{
  name
: "conv1"
  type
: "Convolution"
  bottom
: "data"
  top
: "conv1"
  convolution_param
{
    num_output
: 64
    pad
: 1
    kernel_size
: 3
    stride
: 1
 
}
}
layer
{
  name
: "relu1"
  type
: "ReLU"
  bottom
: "conv1"
  top
: "conv1"
}
layer
{
  name
: "conv2"
  type
: "Convolution"
  bottom
: "conv1"
  top
: "conv2"
  convolution_param
{
    num_output
: 64
    pad
: 1
    kernel_size
: 3
    stride
: 1
 
}
}
layer
{
  name
: "relu2"
  type
: "ReLU"
  bottom
: "conv2"
  top
: "conv2"
}
layer
{
  name
: "pool1"
  type
: "Pooling"
  bottom
: "conv2"
  top
: "pool1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
    pad
: 0
 
}
}
layer
{
  name
: "conv3"
  type
: "Convolution"
  bottom
: "pool1"
  top
: "conv3"
  convolution_param
{
    num_output
: 128
    pad
: 1
    kernel_size
: 3
    stride
: 1
 
}
}
layer
{
  name
: "conv4"
  type
: "Convolution"
  bottom
: "conv3"
  top
: "conv4"
  convolution_param
{
    num_output
: 128
    pad
: 1
    kernel_size
: 3
    stride
: 1
 
}
}
layer
{
  name
: "relu3"
  type
: "ReLU"
  bottom
: "conv4"
  top
: "conv4"
}
layer
{
  name
: "pool2"
  type
: "Pooling"
  bottom
: "conv4"
  top
: "pool2"
  pooling_param
{
    pool
: MAX
    kernel_size
: 3
    stride
: 2
    pad
: 0
 
}
}
layer
{
  name
: "fc6"
  type
: "InnerProduct"
  bottom
: "pool2"
  top
: "fc6"
  inner_product_param
{
    num_output
: 256
 
}
}
layer
{
  name
: "drop1"
  type
: "Dropout"
  bottom
: "fc6"
  top
: "fc6"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "fc7"
  type
: "InnerProduct"
  bottom
: "fc6"
  top
: "fc7"
  inner_product_param
{
    num_output
: 256
 
}
}
layer
{
  name
: "drop2"
  type
: "Dropout"
  bottom
: "fc7"
  top
: "fc7"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}
layer
{
  name
: "score"
  type
: "InnerProduct"
  bottom
: "fc7"
  top
: "score"
  inner_product_param
{
    num_output
: 5
 
}
}
layer
{
  name
: "prob"
  type
: "Softmax"
  bottom
: "score"
  top
: "prob"
}


Jonathan R. Williford

unread,
Nov 16, 2016, 3:56:37 AM11/16/16
to Ehab Albadawy, Caffe Users
Can you show the evolution of accuracy and loss during training?

Jonathan

--
You received this message because you are subscribed to the Google Groups "Caffe Users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to caffe-users+unsubscribe@googlegroups.com.
To post to this group, send email to caffe...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/caffe-users/e6f0331d-6af2-4f02-b7ac-e153a8393055%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Ehab Albadawy

unread,
Nov 16, 2016, 12:23:43 PM11/16/16
to Caffe Users, ehalba...@gmail.com
To unsubscribe from this group and stop receiving emails from it, send an email to caffe-users...@googlegroups.com.

Ehab Albadawy

unread,
Nov 16, 2016, 8:43:24 PM11/16/16
to Caffe Users, ehalba...@gmail.com
Problem is solved! I've found that I was doing something very silly, I gave it the classes with index 1 while it expects them with index 0 ! GOD it should have print an error msg or something instead of waisting the whole day trying to figuring out what's wrong!!!!

Matt Marshall

unread,
Nov 16, 2016, 9:51:09 PM11/16/16
to Caffe Users, ehalba...@gmail.com
How did you obtain that history?  I've tried tee but I guess it doesn't work if training is interrupted with Ctrl+c.


On Wednesday, November 16, 2016 at 12:23:43 PM UTC-5, Ehab Albadawy wrote:

Ehab Albadawy

unread,
Nov 17, 2016, 11:54:16 AM11/17/16
to Caffe Users, ehalba...@gmail.com
you can easily get it if you do the training for terminal (if i got you right) and it's OK if you interrupt the training, the history is still there.
Reply all
Reply to author
Forward
0 new messages