Can't start trianing : blob.cpp:133] Check failed: data_

29 views
Skip to first unread message

n1ma Sh1

unread,
Sep 28, 2022, 6:43:18 AM9/28/22
to Caffe Users
Ubuntu1804

I'm testing a table edge detection project,the original data img size is 1280*720.But i can't start training process.

Error message
 
```
I0928 17:49:53.488544 30253 layer_factory.hpp:77] Creating layer data
I0928 17:49:53.488591 30253 db_lmdb.cpp:35] Opened lmdb /home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_lmdb
I0928 17:49:53.488602 30253 net.cpp:86] Creating Layer data
I0928 17:49:53.488607 30253 net.cpp:382] data -> data
I0928 17:49:53.488624 30253 net.cpp:382] data -> label
I0928 17:49:53.488631 30253 data_transformer.cpp:25] Loading mean file from: /home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_mean.binaryproto
F0928 17:49:53.488655 30253 blob.cpp:133] Check failed: data_
*** Check failure stack trace: ***
    @     0x7fc5b9e070cd  google::LogMessage::Fail()
    @     0x7fc5b9e08f33  google::LogMessage::SendToLog()
    @     0x7fc5b9e06c28  google::LogMessage::Flush()
    @     0x7fc5b9e09999  google::LogMessageFatal::~LogMessageFatal()
    @     0x7fc5ba2c83db  caffe::Blob<>::mutable_cpu_data()
    @     0x7fc5ba2d1db7  caffe::Blob<>::FromProto()
    @     0x7fc5ba2f7632  caffe::DataTransformer<>::DataTransformer()
    @     0x7fc5ba2239a5  caffe::BaseDataLayer<>::LayerSetUp()
    @     0x7fc5ba223abc  caffe::BasePrefetchingDataLayer<>::LayerSetUp()
    @     0x7fc5ba2e6eb3  caffe::Net<>::Init()
    @     0x7fc5ba2e861e  caffe::Net<>::Net()
    @     0x7fc5ba1ae0c4  caffe::Solver<>::InitTrainNet()
    @     0x7fc5ba1ae673  caffe::Solver<>::Init()
    @     0x7fc5ba1ae99f  caffe::Solver<>::Solver()
    @     0x7fc5ba1940f1  caffe::Creator_AdamSolver<>()
    @     0x55b980d7aec7  (unknown)
    @     0x55b980d76f80  (unknown)
    @     0x7fc5b8413c87  __libc_start_main
    @     0x55b980d77a3a  (unknown)
```

full log:

I0928 17:49:53.486743 30253 caffe.cpp:197] Use CPU.
I0928 17:49:53.486958 30253 solver.cpp:45] Initializing solver from parameters:
test_iter: 75
test_interval: 640
base_lr: 0.0001
display: 200
max_iter: 50000
lr_policy: "fixed"
power: 1
momentum: 0.9
weight_decay: 0.0005
snapshot: 2000
snapshot_prefix: "./collision_avoidance"
solver_mode: CPU
net: "/home/lhy/Downloads/code/Algorithm-collision_anti-drop/algorithm/Resnet18_classification/Resnet18_classification_train.prototxt"
train_state {
  level: 0
  stage: ""
}
type: "Adam"
I0928 17:49:53.487066 30253 solver.cpp:102] Creating training net from net file: /home/lhy/Downloads/code/Algorithm-collision_anti-drop/algorithm/Resnet18_classification/Resnet18_classification_train.prototxt
I0928 17:49:53.487984 30253 net.cpp:296] The NetState phase (0) differed from the phase (1) specified by a rule in layer data
I0928 17:49:53.488020 30253 net.cpp:53] Initializing net from parameters:
name: "ResNet-18"
state {
  phase: TRAIN
  level: 0
  stage: ""
}
layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: true
    mean_file: "/home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_mean.binaryproto"
  }
  data_param {
    source: "/home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_lmdb"
    batch_size: 32
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 3
    kernel_size: 7
    stride: 2
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn_conv1"
  type: "BatchNorm"
  bottom: "conv1"
  top: "conv1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale_conv1"
  type: "Scale"
  bottom: "conv1"
  top: "conv1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "conv1_relu"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "res2a_branch1"
  type: "Convolution"
  bottom: "pool1"
  top: "res2a_branch1"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn2a_branch1"
  type: "BatchNorm"
  bottom: "res2a_branch1"
  top: "res2a_branch1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale2a_branch1"
  type: "Scale"
  bottom: "res2a_branch1"
  top: "res2a_branch1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res2a_branch2a"
  type: "Convolution"
  bottom: "pool1"
  top: "res2a_branch2a"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn2a_branch2a"
  type: "BatchNorm"
  bottom: "res2a_branch2a"
  top: "res2a_branch2a"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale2a_branch2a"
  type: "Scale"
  bottom: "res2a_branch2a"
  top: "res2a_branch2a"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res2a_branch2a_relu"
  type: "ReLU"
  bottom: "res2a_branch2a"
  top: "res2a_branch2a"
}
layer {
  name: "res2a_branch2b"
  type: "Convolution"
  bottom: "res2a_branch2a"
  top: "res2a_branch2b"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn2a_branch2b"
  type: "BatchNorm"
  bottom: "res2a_branch2b"
  top: "res2a_branch2b"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale2a_branch2b"
  type: "Scale"
  bottom: "res2a_branch2b"
  top: "res2a_branch2b"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res2a"
  type: "Eltwise"
  bottom: "res2a_branch1"
  bottom: "res2a_branch2b"
  top: "res2a"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res2a_relu"
  type: "ReLU"
  bottom: "res2a"
  top: "res2a"
}
layer {
  name: "res2b_branch2a"
  type: "Convolution"
  bottom: "res2a"
  top: "res2b_branch2a"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn2b_branch2a"
  type: "BatchNorm"
  bottom: "res2b_branch2a"
  top: "res2b_branch2a"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale2b_branch2a"
  type: "Scale"
  bottom: "res2b_branch2a"
  top: "res2b_branch2a"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res2b_branch2a_relu"
  type: "ReLU"
  bottom: "res2b_branch2a"
  top: "res2b_branch2a"
}
layer {
  name: "res2b_branch2b"
  type: "Convolution"
  bottom: "res2b_branch2a"
  top: "res2b_branch2b"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn2b_branch2b"
  type: "BatchNorm"
  bottom: "res2b_branch2b"
  top: "res2b_branch2b"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale2b_branch2b"
  type: "Scale"
  bottom: "res2b_branch2b"
  top: "res2b_branch2b"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res2b"
  type: "Eltwise"
  bottom: "res2a"
  bottom: "res2b_branch2b"
  top: "res2b"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res2b_relu"
  type: "ReLU"
  bottom: "res2b"
  top: "res2b"
}
layer {
  name: "res3a_branch1"
  type: "Convolution"
  bottom: "res2b"
  top: "res3a_branch1"
  convolution_param {
    num_output: 128
    bias_term: false
    pad: 0
    kernel_size: 1
    stride: 2
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn3a_branch1"
  type: "BatchNorm"
  bottom: "res3a_branch1"
  top: "res3a_branch1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale3a_branch1"
  type: "Scale"
  bottom: "res3a_branch1"
  top: "res3a_branch1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res3a_branch2a"
  type: "Convolution"
  bottom: "res2b"
  top: "res3a_branch2a"
  convolution_param {
    num_output: 128
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn3a_branch2a"
  type: "BatchNorm"
  bottom: "res3a_branch2a"
  top: "res3a_branch2a"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale3a_branch2a"
  type: "Scale"
  bottom: "res3a_branch2a"
  top: "res3a_branch2a"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res3a_branch2a_relu"
  type: "ReLU"
  bottom: "res3a_branch2a"
  top: "res3a_branch2a"
}
layer {
  name: "res3a_branch2b"
  type: "Convolution"
  bottom: "res3a_branch2a"
  top: "res3a_branch2b"
  convolution_param {
    num_output: 128
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn3a_branch2b"
  type: "BatchNorm"
  bottom: "res3a_branch2b"
  top: "res3a_branch2b"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale3a_branch2b"
  type: "Scale"
  bottom: "res3a_branch2b"
  top: "res3a_branch2b"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res3a"
  type: "Eltwise"
  bottom: "res3a_branch1"
  bottom: "res3a_branch2b"
  top: "res3a"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res3a_relu"
  type: "ReLU"
  bottom: "res3a"
  top: "res3a"
}
layer {
  name: "res3b_branch2a"
  type: "Convolution"
  bottom: "res3a"
  top: "res3b_branch2a"
  convolution_param {
    num_output: 128
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn3b_branch2a"
  type: "BatchNorm"
  bottom: "res3b_branch2a"
  top: "res3b_branch2a"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale3b_branch2a"
  type: "Scale"
  bottom: "res3b_branch2a"
  top: "res3b_branch2a"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res3b_branch2a_relu"
  type: "ReLU"
  bottom: "res3b_branch2a"
  top: "res3b_branch2a"
}
layer {
  name: "res3b_branch2b"
  type: "Convolution"
  bottom: "res3b_branch2a"
  top: "res3b_branch2b"
  convolution_param {
    num_output: 128
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn3b_branch2b"
  type: "BatchNorm"
  bottom: "res3b_branch2b"
  top: "res3b_branch2b"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale3b_branch2b"
  type: "Scale"
  bottom: "res3b_branch2b"
  top: "res3b_branch2b"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res3b"
  type: "Eltwise"
  bottom: "res3a"
  bottom: "res3b_branch2b"
  top: "res3b"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res3b_relu"
  type: "ReLU"
  bottom: "res3b"
  top: "res3b"
}
layer {
  name: "res4a_branch1"
  type: "Convolution"
  bottom: "res3b"
  top: "res4a_branch1"
  convolution_param {
    num_output: 256
    bias_term: false
    pad: 0
    kernel_size: 1
    stride: 2
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn4a_branch1"
  type: "BatchNorm"
  bottom: "res4a_branch1"
  top: "res4a_branch1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale4a_branch1"
  type: "Scale"
  bottom: "res4a_branch1"
  top: "res4a_branch1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res4a_branch2a"
  type: "Convolution"
  bottom: "res3b"
  top: "res4a_branch2a"
  convolution_param {
    num_output: 256
    bias_term: false
    pad: 1
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "bn4a_branch2a"
  type: "BatchNorm"
  bottom: "res4a_branch2a"
  top: "res4a_branch2a"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale4a_branch2a"
  type: "Scale"
  bottom: "res4a_branch2a"
  top: "res4a_branch2a"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res4a_branch2a_relu"
  type: "ReLU"
  bottom: "res4a_branch2a"
  top: "res4a_branch2a"
}
layer {
  name: "res4a_branch2b-1"
  type: "Convolution"
  bottom: "res4a_branch2a"
  top: "res4a_branch2b-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn4a_branch2b"
  type: "BatchNorm"
  bottom: "res4a_branch2b-1"
  top: "res4a_branch2b-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale4a_branch2b"
  type: "Scale"
  bottom: "res4a_branch2b-1"
  top: "res4a_branch2b-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res4a"
  type: "Eltwise"
  bottom: "res4a_branch1"
  bottom: "res4a_branch2b-1"
  top: "res4a"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res4a_relu"
  type: "ReLU"
  bottom: "res4a"
  top: "res4a"
}
layer {
  name: "res4b_branch2a-1"
  type: "Convolution"
  bottom: "res4a"
  top: "res4b_branch2a-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn4b_branch2a"
  type: "BatchNorm"
  bottom: "res4b_branch2a-1"
  top: "res4b_branch2a-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale4b_branch2a"
  type: "Scale"
  bottom: "res4b_branch2a-1"
  top: "res4b_branch2a-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res4b_branch2a_relu"
  type: "ReLU"
  bottom: "res4b_branch2a-1"
  top: "res4b_branch2a-1"
}
layer {
  name: "res4b_branch2b-1"
  type: "Convolution"
  bottom: "res4b_branch2a-1"
  top: "res4b_branch2b-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn4b_branch2b"
  type: "BatchNorm"
  bottom: "res4b_branch2b-1"
  top: "res4b_branch2b-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale4b_branch2b"
  type: "Scale"
  bottom: "res4b_branch2b-1"
  top: "res4b_branch2b-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res4b"
  type: "Eltwise"
  bottom: "res4a"
  bottom: "res4b_branch2b-1"
  top: "res4b"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res4b_relu"
  type: "ReLU"
  bottom: "res4b"
  top: "res4b"
}
layer {
  name: "res5a_branch1-1"
  type: "Convolution"
  bottom: "res4b"
  top: "res5a_branch1-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 512
    pad: 0
    kernel_size: 1
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5a_branch1"
  type: "BatchNorm"
  bottom: "res5a_branch1-1"
  top: "res5a_branch1-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale5a_branch1"
  type: "Scale"
  bottom: "res5a_branch1-1"
  top: "res5a_branch1-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res5a_branch2a-1"
  type: "Convolution"
  bottom: "res4b"
  top: "res5a_branch2a-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5a_branch2a"
  type: "BatchNorm"
  bottom: "res5a_branch2a-1"
  top: "res5a_branch2a-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale5a_branch2a"
  type: "Scale"
  bottom: "res5a_branch2a-1"
  top: "res5a_branch2a-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res5a_branch2a_relu"
  type: "ReLU"
  bottom: "res5a_branch2a-1"
  top: "res5a_branch2a-1"
}
layer {
  name: "res5a_branch2b-1"
  type: "Convolution"
  bottom: "res5a_branch2a-1"
  top: "res5a_branch2b-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5a_branch2b"
  type: "BatchNorm"
  bottom: "res5a_branch2b-1"
  top: "res5a_branch2b-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale5a_branch2b"
  type: "Scale"
  bottom: "res5a_branch2b-1"
  top: "res5a_branch2b-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res5a"
  type: "Eltwise"
  bottom: "res5a_branch1-1"
  bottom: "res5a_branch2b-1"
  top: "res5a"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res5a_relu"
  type: "ReLU"
  bottom: "res5a"
  top: "res5a"
}
layer {
  name: "res5b_branch2a-1"
  type: "Convolution"
  bottom: "res5a"
  top: "res5b_branch2a-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5b_branch2a"
  type: "BatchNorm"
  bottom: "res5b_branch2a-1"
  top: "res5b_branch2a-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale5b_branch2a"
  type: "Scale"
  bottom: "res5b_branch2a-1"
  top: "res5b_branch2a-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res5b_branch2a_relu"
  type: "ReLU"
  bottom: "res5b_branch2a-1"
  top: "res5b_branch2a-1"
}
layer {
  name: "res5b_branch2b-1"
  type: "Convolution"
  bottom: "res5b_branch2a-1"
  top: "res5b_branch2b-1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 1
  }
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5b_branch2b"
  type: "BatchNorm"
  bottom: "res5b_branch2b-1"
  top: "res5b_branch2b-1"
  batch_norm_param {
    moving_average_fraction: 0.9
  }
}
layer {
  name: "scale5b_branch2b"
  type: "Scale"
  bottom: "res5b_branch2b-1"
  top: "res5b_branch2b-1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "res5b"
  type: "Eltwise"
  bottom: "res5a"
  bottom: "res5b_branch2b-1"
  top: "res5b"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "res5b_relu"
  type: "ReLU"
  bottom: "res5b"
  top: "res5b"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "res5b"
  top: "pool5"
  pooling_param {
    pool: AVE
    kernel_size: 7
    stride: 1
  }
}
layer {
  name: "fc2"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc2"
  param {
    lr_mult: 50
    decay_mult: 1
  }
  param {
    lr_mult: 100
    decay_mult: 1
  }
  inner_product_param {
    num_output: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc2"
  bottom: "label"
  top: "loss"
}
layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "fc2"
  bottom: "label"
  top: "accuracy"
}
I0928 17:49:53.488544 30253 layer_factory.hpp:77] Creating layer data
I0928 17:49:53.488591 30253 db_lmdb.cpp:35] Opened lmdb /home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_lmdb
I0928 17:49:53.488602 30253 net.cpp:86] Creating Layer data
I0928 17:49:53.488607 30253 net.cpp:382] data -> data
I0928 17:49:53.488624 30253 net.cpp:382] data -> label
I0928 17:49:53.488631 30253 data_transformer.cpp:25] Loading mean file from: /home/lhy/Downloads/code/Algorithm-collision_anti-drop/data_preprocess/dataset/whole_dataset_train_mean.binaryproto
F0928 17:49:53.488655 30253 blob.cpp:133] Check failed: data_
*** Check failure stack trace: ***
    @     0x7fc5b9e070cd  google::LogMessage::Fail()
    @     0x7fc5b9e08f33  google::LogMessage::SendToLog()
    @     0x7fc5b9e06c28  google::LogMessage::Flush()
    @     0x7fc5b9e09999  google::LogMessageFatal::~LogMessageFatal()
    @     0x7fc5ba2c83db  caffe::Blob<>::mutable_cpu_data()
    @     0x7fc5ba2d1db7  caffe::Blob<>::FromProto()
    @     0x7fc5ba2f7632  caffe::DataTransformer<>::DataTransformer()
    @     0x7fc5ba2239a5  caffe::BaseDataLayer<>::LayerSetUp()
    @     0x7fc5ba223abc  caffe::BasePrefetchingDataLayer<>::LayerSetUp()
    @     0x7fc5ba2e6eb3  caffe::Net<>::Init()
    @     0x7fc5ba2e861e  caffe::Net<>::Net()
    @     0x7fc5ba1ae0c4  caffe::Solver<>::InitTrainNet()
    @     0x7fc5ba1ae673  caffe::Solver<>::Init()
    @     0x7fc5ba1ae99f  caffe::Solver<>::Solver()
    @     0x7fc5ba1940f1  caffe::Creator_AdamSolver<>()
    @     0x55b980d7aec7  (unknown)
    @     0x55b980d76f80  (unknown)
    @     0x7fc5b8413c87  __libc_start_main
    @     0x55b980d77a3a  (unknown)

Reply all
Reply to author
Forward
0 new messages