Hi
I want to use a pre-trained caffe model for classification of memory images.
I search the internet and find some good codes and change them as in the end.
The cpp code can be compiled, but after running the code, I receive the following Error:
----------------------------------------------------------------------------------------------------------------------------------------------------------
ERORR :
----------------------------------------------------------------------------------------------------------------------------------------------------------
libprotobuf ERROR google/protobuf/text_format.cc:172] Error parsing text-format caffe.NetParameter: 18:8: Message type "caffe.LayerParameter" has no field named "layers".
WARNING: Logging before InitGoogleLogging() is written to STDERR
F0414 14:25:23.161783 10013 upgrade_proto.cpp:623] Check failed: ReadProtoFromTextFile(param_file, param) Failed to parse NetParameter file: /home/image-server/Desktop/caffe-code/caffe/models/bvlc_reference_caffenet/test1.prototxt
*** Check failure stack trace: ***
Aborted (core dumped)
Here is my cpp code and prototxt:
----------------------------------------------------------------------------------------------------------------------------------------------------------
cpp code :
----------------------------------------------------------------------------------------------------------------------------------------------------------
#include <cuda_runtime.h>
#include <cstring>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <stdio.h>
#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
#include "caffe/blob.hpp"
using namespace caffe;
using namespace std;
int main(int argc, char** argv) {
int device_id = 0;
Caffe::SetDevice(device_id);
Caffe::set_mode(Caffe::GPU);
Caffe::set_phase(Caffe::TEST);
//get the net
Net<float> caffe_test_net("./models/bvlc_reference_caffenet/test1.prototxt");
//get trained net
caffe_test_net.CopyTrainedLayersFrom("./models/bvlc_reference_caffenet/caffenet_train_iter_5000.caffemodel");
//get datum
Datum datum;
if (!ReadImageToDatum("./examples/images/cat.jpg", 1, 256, 256, &datum)) {
LOG(ERROR) << "Error during file reading";
}
//get the blob
Blob<float>* blob = new Blob<float>(1, datum.channels(), datum.height(), datum.width());
//get the blobproto
BlobProto blob_proto;
blob_proto.set_num(1);
blob_proto.set_channels(datum.channels());
blob_proto.set_height(datum.height());
blob_proto.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
int size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
for (int i = 0; i < size_in_datum; ++i) {
blob_proto.add_data(0.);
}
const string& data = datum.data();
if (data.size() != 0) {
for (int i = 0; i < size_in_datum; ++i) {
blob_proto.set_data(i, blob_proto.data(i) + (uint8_t)data[i]);
}
}
//set data into blob
blob->FromProto(blob_proto);
//fill the vector
vector<Blob<float>*> bottom;
bottom.push_back(blob);
float type = 0.0;
const vector<Blob<float>*>& result = caffe_test_net.Forward(bottom, &type);
//Here I can use the argmax layer, but for now I do a simple for :)
float max = 0;
float max_i = 0;
for (int i = 0; i < 3; ++i) {
float value = result[0]->cpu_data()[i];
if (max < value){
max = value;
max_i = i;
}
}
LOG(ERROR) << "max: " << max << " i " << max_i;
return 0;
}
----------------------------------------------------------------------------------------------------------------------------------------------------------
prototxt file:
----------------------------------------------------------------------------------------------------------------------------------------------------------
name: "CaffeNet"
layers {
name: "data"
type: MEMORY_DATA
top: "data"
top: "label"
memory_data_param {
batch_size: 1
channels: 3
height: 227
width: 227
}
transform_param {
crop_size: 227
mirror: true
mean_file: "./data/ilsvrc12/mean.binaryproto"
}
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
}
}
layers {
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "conv4"
type: CONVOLUTION
bottom: "conv3"
top: "conv4"
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
}
}
layers {
name: "relu4"
type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
name: "conv5"
type: CONVOLUTION
bottom: "conv4"
top: "conv5"
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
}
}
layers {
name: "relu5"
type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 32
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 32
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8"
inner_product_param {
num_output: 3
}
}
layers {
name: "prob"
type: SOFTMAX
bottom: "fc8"
top: "prob"
}
----------------------------------------------------------------------------------------------------------------------------------------------------------
Thanks alot