using a pre-trained caffe model for classification of memory images

2,217 views
Skip to first unread message

hf2548

unread,
Apr 14, 2015, 6:00:16 AM4/14/15
to caffe...@googlegroups.com
Hi

I want to use a pre-trained caffe model for classification of memory images.
I search the internet and find some good codes and change them as in the end.
The cpp code can be compiled, but after running the code, I receive the following Error:
----------------------------------------------------------------------------------------------------------------------------------------------------------
ERORR :
----------------------------------------------------------------------------------------------------------------------------------------------------------
libprotobuf ERROR google/protobuf/text_format.cc:172] Error parsing text-format caffe.NetParameter: 18:8: Message type "caffe.LayerParameter" has no field named "layers".
WARNING: Logging before InitGoogleLogging() is written to STDERR
F0414 14:25:23.161783 10013 upgrade_proto.cpp:623] Check failed: ReadProtoFromTextFile(param_file, param) Failed to parse NetParameter file: /home/image-server/Desktop/caffe-code/caffe/models/bvlc_reference_caffenet/test1.prototxt
*** Check failure stack trace: ***
Aborted (core dumped)

Here is my cpp code and prototxt:
----------------------------------------------------------------------------------------------------------------------------------------------------------
cpp code :
----------------------------------------------------------------------------------------------------------------------------------------------------------
#include <cuda_runtime.h>
 
#include <cstring>
#include <cstdlib>
#include <vector>
 
#include <string>
#include <iostream>
#include <stdio.h>
#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
#include "caffe/blob.hpp"

using namespace caffe;
using namespace std;
 
int main(int argc, char** argv) {
int device_id = 0;
Caffe::SetDevice(device_id);
Caffe::set_mode(Caffe::GPU);
Caffe::set_phase(Caffe::TEST);
//get the net
Net<float> caffe_test_net("./models/bvlc_reference_caffenet/test1.prototxt");
//get trained net
caffe_test_net.CopyTrainedLayersFrom("./models/bvlc_reference_caffenet/caffenet_train_iter_5000.caffemodel");
//get datum
Datum datum;
if (!ReadImageToDatum("./examples/images/cat.jpg", 1, 256, 256, &datum)) {
LOG(ERROR) << "Error during file reading";
}
 
//get the blob
Blob<float>* blob = new Blob<float>(1, datum.channels(), datum.height(), datum.width());
//get the blobproto
BlobProto blob_proto;
blob_proto.set_num(1);
blob_proto.set_channels(datum.channels());
blob_proto.set_height(datum.height());
blob_proto.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
int size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
for (int i = 0; i < size_in_datum; ++i) {
blob_proto.add_data(0.);
}
const string& data = datum.data();
if (data.size() != 0) {
for (int i = 0; i < size_in_datum; ++i) {
blob_proto.set_data(i, blob_proto.data(i) + (uint8_t)data[i]);
}
}
//set data into blob
blob->FromProto(blob_proto);
 
//fill the vector
vector<Blob<float>*> bottom;
bottom.push_back(blob);
float type = 0.0;
 
const vector<Blob<float>*>& result = caffe_test_net.Forward(bottom, &type);
 
//Here I can use the argmax layer, but for now I do a simple for :)
float max = 0;
float max_i = 0;
for (int i = 0; i < 3; ++i) {
float value = result[0]->cpu_data()[i];
if (max < value){
max = value;
max_i = i;
}
}
LOG(ERROR) << "max: " << max << " i " << max_i;
 
return 0;
}
----------------------------------------------------------------------------------------------------------------------------------------------------------
prototxt file:
----------------------------------------------------------------------------------------------------------------------------------------------------------
name: "CaffeNet"
layers {
  name: "data"
  type: MEMORY_DATA
  top: "data"
  top: "label"
  memory_data_param {
   batch_size: 1
   channels: 3
   height: 227
   width: 227
  }
 transform_param {
    crop_size: 227
    mirror: true
    mean_file: "./data/ilsvrc12/mean.binaryproto"
  }
layers {
  name: "conv1"
  type: CONVOLUTION
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
  }
}
layers {
  name: "relu1"
  type: RELU
  bottom: "conv1"
  top: "conv1"
}
layers {
  name: "pool1"
  type: POOLING
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layers {
  name: "norm1"
  type: LRN
  bottom: "pool1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layers {
  name: "conv2"
  type: CONVOLUTION
  bottom: "norm1"
  top: "conv2"
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
  }
}
layers {
  name: "relu2"
  type: RELU
  bottom: "conv2"
  top: "conv2"
}
layers {
  name: "pool2"
  type: POOLING
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layers {
  name: "norm2"
  type: LRN
  bottom: "pool2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layers {
  name: "conv3"
  type: CONVOLUTION
  bottom: "norm2"
  top: "conv3"
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
  }
}
layers {
  name: "relu3"
  type: RELU
  bottom: "conv3"
  top: "conv3"
}
layers {
  name: "conv4"
  type: CONVOLUTION
  bottom: "conv3"
  top: "conv4"
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
  }
}
layers {
  name: "relu4"
  type: RELU
  bottom: "conv4"
  top: "conv4"
}
layers {
  name: "conv5"
  type: CONVOLUTION
  bottom: "conv4"
  top: "conv5"
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
  }
}
layers {
  name: "relu5"
  type: RELU
  bottom: "conv5"
  top: "conv5"
}
layers {
  name: "pool5"
  type: POOLING
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layers {
  name: "fc6"
  type: INNER_PRODUCT
  bottom: "pool5"
  top: "fc6"
  inner_product_param {
    num_output: 32
  }
}
layers {
  name: "relu6"
  type: RELU
  bottom: "fc6"
  top: "fc6"
}
layers {
  name: "drop6"
  type: DROPOUT
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layers {
  name: "fc7"
  type: INNER_PRODUCT
  bottom: "fc6"
  top: "fc7"
  inner_product_param {
    num_output: 32
  }
}
layers {
  name: "relu7"
  type: RELU
  bottom: "fc7"
  top: "fc7"
}
layers {
  name: "drop7"
  type: DROPOUT
  bottom: "fc7"
  top: "fc7"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layers {
  name: "fc8"
  type: INNER_PRODUCT
  bottom: "fc7"
  top: "fc8"
  inner_product_param {
    num_output: 3
  }
}
layers {
  name: "prob"
  type: SOFTMAX
  bottom: "fc8"
  top: "prob"
}
----------------------------------------------------------------------------------------------------------------------------------------------------------

Thanks alot

Spandana Gella

unread,
Apr 17, 2015, 10:36:13 AM4/17/15
to caffe...@googlegroups.com
If you are using latest version of caffe. The string later type syntax has been changed to 'layer{}' instead of 'layers{}'. Try changing it in prototxt file and see if it resolves.

spandana

Jeremy Rutman

unread,
Apr 17, 2016, 12:14:09 PM4/17/16
to Caffe Users
It would appear the layer has been renamed back to layers{} as I just hit this
root@05eae3ced4f9:~/caffe# caffe test -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -gpu 0 -iterations 100
libdc1394 error
: Failed to initialize libdc1394
I0417
16:08:03.216303   284 caffe.cpp:134] Use GPU with device ID 0
[libprotobuf ERROR google/protobuf/text_format.cc:245] Error parsing text-format caffe.NetParameter: 2:7: Message type "caffe.NetParameter" has no field named "layer".
After replacing 'layer' with 'layers' in the prototxt I hit 
[libprotobuf ERROR google/protobuf/text_format.cc:245] Error parsing text-format caffe.NetParameter: 4:9: Expected integer or identifier.

the error is at the part of the protoxt defining the layer type (line 4 is :  type: "Data")  
This is with the latest caffe, just downloaded 17 april 2016

Evan Shelhamer

unread,
Apr 17, 2016, 4:21:49 PM4/17/16
to Jeremy Rutman, Caffe Users
layer has been renamed back to layers{}

​No, the arrow of time flies forward from `layers` -> `layer`.​
 
​You are likely using a (quite) old version of Caffe and I recommend upgrading.​

Evan Shelhamer





--
You received this message because you are subscribed to the Google Groups "Caffe Users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to caffe-users...@googlegroups.com.
To post to this group, send email to caffe...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/caffe-users/2f1a97de-dee0-432d-a7e3-9b8133d6e504%40googlegroups.com.

For more options, visit https://groups.google.com/d/optout.

Jeremy Rutman

unread,
Apr 17, 2016, 6:18:41 PM4/17/16
to Caffe Users, jeremy...@gmail.com
yes - too many forking branches in a container made me lose my way

Jan

unread,
Apr 18, 2016, 9:48:10 AM4/18/16
to Caffe Users, jeremy...@gmail.com
It is "layer". Ref: https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto#L92.

There is a tool to upgrade an old prototxt, see https://github.com/BVLC/caffe/blob/master/tools/upgrade_net_proto_text.cpp. It should be compiled along with all the other caffe binaries.

Jan
Reply all
Reply to author
Forward
0 new messages