Check failed: registry.count(type) == 1 (0 vs. 1) Unknown layer type

2,028 views
Skip to first unread message

Ali MassahKiani

unread,
Mar 22, 2017, 8:56:50 AM3/22/17
to Caffe Users
i run simple c++ code to pass image to my preTrained caffemodel bur when i want create network this error occur :

Check failed: registry.count(type) == 1 (0 vs. 1) Unknown layer type: Input (known types: Convolution, Eltwise, LRN, Pooling, Power, Python, ReLU, Sigmoid, Softmax, Split, TanH)



my prototxt file :

#############################  DATA Layer  #############################
layer
{
    name
: "data_1"
    type
: "Input"
    top
: "data_1"
    input_param
{ shape: { dim: 640 dim: 480 } }
}
#############################  CONV NET 1 #############################
layer
{
  name
: "conv1_1"
  type
: "Convolution"
  bottom
: "data_1"
  top
: "conv1_1"
  param
{
    name
: "conv1_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "conv1_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 20
    kernel_size
: 4
    stride
: 1
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}
}
layer
{
  name
: "relu1_1"
  type
: "ReLU"
  bottom
: "conv1_1"
  top
: "conv1_1"
}
layer
{
  name
: "norm1_1"
  type
: "LRN"
  bottom
: "conv1_1"
  top
: "norm1_1"
  lrn_param
{
    local_size
: 5
    alpha
: 0.0001
    beta
: 0.75
 
}
}
layer
{
  name
: "pool1_1"
  type
:  "Pooling"
  bottom
: "norm1_1"
  top
: "pool1_1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 2
    stride
: 2
 
}
}
layer
{
  name
: "conv2_1"
  type
: "Convolution"
  bottom
: "pool1_1"
  top
: "conv2_1"
  param
{
    name
: "conv2_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "conv2_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 40
    kernel_size
: 3
   
group: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0.1
   
}
 
}


}
layer
{
  name
: "relu2_1"
  type
: "ReLU"
  bottom
: "conv2_1"
  top
: "conv2_1"
}
layer
{
  name
: "norm2_1"
  type
: "LRN"
  bottom
: "conv2_1"
  top
: "norm2_1"
  lrn_param
{
    local_size
: 5
    alpha
: 0.0001
    beta
: 0.75
 
}
}
layer
{
  name
: "pool2_1"
  type
:  "Pooling"
  bottom
: "norm2_1"
  top
: "pool2_1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 2
    stride
: 2
 
}
}
layer
{
  name
: "conv3_1"
  type
: "Convolution"
  bottom
: "pool2_1"
  top
: "conv3_1"
  param
{
    name
: "conv3_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "conv3_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 60
    kernel_size
: 3
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}


}
layer
{
  name
: "pool3_1"
  type
:  "Pooling"
  bottom
: "conv3_1"
  top
: "pool3_1"
  pooling_param
{
    pool
: MAX
    kernel_size
: 2
    stride
: 2
 
}
}
layer
{
  name
: "conv4_1"
  type
: "Convolution"
  bottom
: "pool3_1"
  top
: "conv4_1"
  param
{
    name
: "conv4_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "conv4_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  convolution_param
{
    num_output
: 80
    kernel_size
: 2
    stride
: 2
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0.1
   
}
 
}


}
layer
{
  name
:"flatten_pool3_1"
  type
:"Flatten"
  bottom
:"pool3_1"
  top
:"flatten_pool3_1"
}
layer
{
  name
:"flatten_conv4_1"
  type
:"Flatten"
  bottom
:"conv4_1"
  top
:"flatten_conv4_1"
}
layer
{
  name
:"contact_conv"
  type
:"Concat"
  bottom
:"flatten_conv4_1"
  bottom
:"flatten_pool3_1"
  top
:"contact_conv"
}
layer
{
  name
: "deepid_1"
  type
:  "InnerProduct"
  bottom
: "contact_conv"
  top
: "deepid_1"
  param
{
    name
: "fc6_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "fc6_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 160
    weight_filler
{
      type
: "gaussian"
      std
: 0.005
   
}
    bias_filler
{
      type
: "constant"
      value
: 0.1
   
}
 
}


}
layer
{
  name
: "relu6_1"
  type
: "ReLU"
  bottom
: "deepid_1"
  top
: "deepid_1"
}
layer
{
  name
: "drop6_1"
  type
:  "Dropout"
  bottom
: "deepid_1"
  top
: "deepid_1"
  dropout_param
{
    dropout_ratio
: 0.5
 
}
}


layer
{
  name
: "fc8_1"
  type
:  "InnerProduct"
  bottom
: "deepid_1"
  top
: "fc8_1"
  param
{
    name
: "fc8_w"
    lr_mult
: 1
    decay_mult
: 1
 
}
  param
{
    name
: "fc8_b"
    lr_mult
: 2
    decay_mult
: 0
 
}
  inner_product_param
{
    num_output
: 10575
    weight_filler
{
      type
: "gaussian"
      std
: 0.01
   
}
    bias_filler
{
      type
: "constant"
      value
: 0
   
}
 
}


}
layer
{
  name
: "loss"
  type
: "Softmax"
  bottom
: "fc8_1"
  top
: "loss"
}

and my simple code :

#include <cuda_runtime.h>
#include <cstring>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <stdio.h>
#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
#include "caffe/blob.hpp"

using namespace caffe;
using namespace std;

int main(int argc, char** argv) {
 
Caffe::set_mode(Caffe::CPU);
 caffe
::string netS = "C:/Users/127051/Documents/Visual Studio 2015/Projects/C++/Caffe/CaffeTest/x64/Release/net_struct.prototxt";
 caffe
::string netW = "C:/Users/127051/Documents/Visual Studio 2015/Projects/C++/Caffe/CaffeTest/x64/Release/net_weights.caffemodel";
 
//get the net
 
Net<float> caffe_test_net(netS,TEST);
 
//get trained net
 caffe_test_net
.CopyTrainedLayersFrom(netW);

 
//get datum
 
Datum datum;
 
if (!ReadImageToDatum("1-08.jpg", 1, 640, 480, &datum)) {
 LOG
(ERROR) << "Error during file reading";
 
}
 
//get the blob
 
Blob<float>* blob = new Blob<float>(1, datum.channels(), datum.height(), datum.width());
 
//get the blobproto
 
BlobProto blob_proto;
 blob_proto
.set_num(1);
 blob_proto
.set_channels(datum.channels());
 blob_proto
.set_height(datum.height());
 blob_proto
.set_width(datum.width());
 
const int data_size = datum.channels() * datum.height() * datum.width();
 
int size_in_datum = std::max<int>(datum.data().size(),
 datum
.float_data_size());
 
for (int i = 0; i < size_in_datum; ++i) {
 blob_proto
.add_data(0.);
 
}
 
const string& data = datum.data();
 
if (data.size() != 0) {
 
for (int i = 0; i < size_in_datum; ++i) {
 blob_proto
.set_data(i, blob_proto.data(i) + (uint8_t)data[i]);
 
}
 
}
 
//set data into blob
 blob
->FromProto(blob_proto);
 
//fill the vector
 vector
<Blob<float>*> bottom;
 bottom
.push_back(blob);
 
float type = 0.0;
 
const vector<Blob<float>*>& result = caffe_test_net.Forward(bottom, &type);
 
//Here I can use the argmax layer, but for now I do a simple for :)
 
float max = 0;
 
float max_i = 0;
 
for (int i = 0; i < 1000; ++i) {
 
float value = result[0]->cpu_data()[i];
 
if (max < value) {
 max
= value;
 max_i
= i;
 
}
 
}
 LOG
(ERROR) << "max: " << max << " i " << max_i;
 
return 0;
}

Przemek D

unread,
Mar 23, 2017, 7:22:03 AM3/23/17
to Caffe Users
Your issue looks similar to https://github.com/BVLC/caffe/issues/3846 - have you consulted this answer?

Ali MassahKiani

unread,
Mar 23, 2017, 8:14:33 AM3/23/17
to Caffe Users
i try this format :
input: "data"
input_shape
{
  dim
: 1
  dim
: 3
  dim
: 640
  dim
: 480
}
but don't work!
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0323
16:43:39.494524  6784 common.cpp:36] System entropy source not available, using fallback algorithm to generate seed instead.
I0323
16:43:43.032114  6784 upgrade_proto.cpp:67] Attempting to upgrade input file specified using deprecated input fields: C:/Users/127051/Documents/Visual Studio 2015/Projects/C++/Caffe/CaffeTest/x64/Release/net_struct.prototxt
I0323
16:43:43.033090  6784 upgrade_proto.cpp:70] Successfully upgraded file specified using deprecated input fields.
W0323
16:43:43.033602  6784 upgrade_proto.cpp:72] Note that future Caffe releases will only support input layers and not input fields.
I0323
16:43:43.034595  6784 net.cpp:53] Initializing net from parameters:
state
{
  phase
: TEST
  level
: 0
}
layer
{
  name
: "input"
  type
: "Input"
  top
: "data"
  input_param
{
    shape
{
      dim
: 1
      dim
: 3
      dim
: 640
      dim
: 480
   
}
 
}
}
...


I0323
16:43:44.057319  6784 layer_factory.cpp:58] Creating layer input
F0323
16:43:44.068845  6784 layer_factory.cpp:62] Check failed: registry.count(type) == 1 (0 vs. 1) Unknown layer type: Input (known types: Convolution, Eltwise, LRN, Pooling, Power, Python, ReLU, Sigmoid, Softmax, Split, TanH)
*** Check failure stack trace: ***

Przemek D

unread,
Mar 24, 2017, 9:00:07 AM3/24/17
to Caffe Users
Try to run the same prototxt from a command line interface, like so. Does the network load properly?

Ali MassahKiani

unread,
Mar 24, 2017, 10:50:12 AM3/24/17
to Caffe Users
i run caffe test but output is :
commands:
  train           train
or finetune a model
  test            score a model
  device_query    show GPU diagnostic information
  time            benchmark model execution time


 
No modules matched: use -help
i think because data layer is Input and haven't data!

Przemek D

unread,
Mar 27, 2017, 3:36:32 AM3/27/17
to Caffe Users
No, you got this output because you called the command wrong somehow. The correct usage of caffe command line is as follows:
caffe: command line brew
usage
: caffe <command> <args>

commands
:
  train           train
or finetune a model

  test            score a model
  device_query    show GPU diagnostic information
  time            benchmark model execution time

    -gpu (Optional; run in GPU mode on given device IDs separated by ','.Use
     
'-gpu all' to run on all available GPUs. The effective training batch
      size
is multiplied by the number of devices.) type: string default: ""
   
-iterations (The number of iterations to run.) type: int32 default: 50
   
-level (Optional; network level.) type: int32 default: 0
   
-model (The model definition protocol buffer text file.) type: string
     
default: ""
   
-phase (Optional; network phase (TRAIN or TEST). Only used for 'time'.)
      type
: string default: ""
   
-sighup_effect (Optional; action to take when a SIGHUP signal is received:
      snapshot
, stop or none.) type: string default: "snapshot"
   
-sigint_effect (Optional; action to take when a SIGINT signal is received:
      snapshot
, stop or none.) type: string default: "stop"
   
-snapshot (Optional; the snapshot solver state to resume training.)
      type
: string default: ""
   
-solver (The solver definition protocol buffer text file.) type: string
     
default: ""
   
-stage (Optional; network stages (not to be confused with phase), separated
     
by ','.) type: string default: ""
   
-weights (Optional; the pretrained weights to initialize finetuning,
      separated
by ','. Cannot be set simultaneously with snapshot.)
      type
: string default: ""
Read the tutorial I linked in my previous answer in case of further doubt.

Guillaume Dumont

unread,
Mar 27, 2017, 8:33:47 AM3/27/17
to Caffe Users

Ali MassahKiani

unread,
Mar 28, 2017, 12:53:38 AM3/28/17
to Caffe Users
i solve this problem in this link :
Reply all
Reply to author
Forward
0 new messages