Nonsense prediction results with deploy in C++

31 views
Skip to first unread message

Atena Nguyen

unread,
Jul 25, 2017, 1:36:06 PM7/25/17
to Caffe Users
Hi Caffe users, 

I have trained the network and test the deploy.prototxt for both python and Matlab. Both interfaces produce reasonable results but not CPP interface. I got non-sense results with all output after softmax layer is 0.5. 

My network is some sort of modified fully convolution neural network (FCN) for the segmentation-like problem.  
My data are not images. They are single channels of size 1x1x32x32, read from txt files. Our label is a binary matrix of size 1x1x32x32.
and the output after softmax in size of 1x2x32x32. 

Did anyone get this problem before? 

Thank you for your time, 


My cpp code is 
/*
* Load trained Caffe model and run single image test with OpenCV
*/

#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
#include "caffe/blob.hpp"
#include "caffe_reg.h"

#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif  // USE_OPENCV

#include "boost/smart_ptr/shared_ptr.hpp"

// Caffe's required library
#pragma comment(lib, "caffe.lib")

// enable namespace
using namespace std;
using namespace cv;
using namespace boost;
using namespace caffe;

// set caffe root path manually
const string CAFFE_ROOT = "";

int main(int argc, char** argv)
{
int rows = 32, cols = 32; 
double m;

// Geting input TU as img
Mat img = Mat::zeros(rows, cols, CV_16UC1);//Matrix to store values
string filename = "tmp_tu.txt";

ifstream fileStream(filename);
int cnt = 0;//index starts from 0
while (fileStream >> m)
{
int temprow = cnt / cols;
int tempcol = cnt % cols;
img.at<float>(temprow, tempcol) = m;
cnt++;
//cout << m << '\t';
}
// Set up Caffe
Caffe::set_mode(Caffe::CPU);
// Load net
boost::shared_ptr<Net<float> > net_; 
string model_file = "deploy_wres.prototxt"; 
net_.reset(new Net<float>(model_file , TEST) );

string trained_file = CAFFE_ROOT + "WRES_iter_10000.caffemodel";
net_->CopyTrainedLayersFrom(trained_file);

// set the patch for testing
vector<Mat> patches;
patches.push_back(img);

// push vector<Mat> to data layer
float loss = 0.0;
boost::shared_ptr<MemoryDataLayer<float> > memory_data_layer;
memory_data_layer = boost::static_pointer_cast<MemoryDataLayer<float>>(net_->layer_by_name("data"));

vector<int> labels(patches.size());
memory_data_layer->AddMatVector(patches, labels);

// Net forward
const vector<Blob<float>*> & results = net_->Forward(&loss);
float *output = results[0]->mutable_cpu_data();

// Display the output
for (int i = 0; i < 32 * 32; i++) {
printf(" %d is %.3f\n", i, output[i]);
}
waitKey(0);
}

my deploy file
layer {
  name: "data"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 1
      dim: 32
      dim: 32
    }
  }
}
layer {
  name: "conv0"
  type: "Convolution"
  bottom: "data"
  top: "conv0"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 16
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu0"
  type: "ReLU"
  bottom: "conv0"
  top: "conv0"
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "conv0"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 32
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "batn1"
  type: "BatchNorm"
  bottom: "conv1"
  top: "batn1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}
layer {
  name: "scale1"
  type: "Scale"
  bottom: "batn1"
  top: "batn1"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "batn1"
  top: "batn1"
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "batn1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 64
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "batn2"
  type: "BatchNorm"
  bottom: "conv2"
  top: "batn2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}
layer {
  name: "scale2"
  type: "Scale"
  bottom: "batn2"
  top: "batn2"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "batn2"
  top: "batn2"
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "batn2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 64
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "batn3"
  type: "BatchNorm"
  bottom: "conv3"
  top: "batn3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  batch_norm_param {
    use_global_stats: false
  }
}
layer {
  name: "scale3"
  type: "Scale"
  bottom: "batn3"
  top: "batn3"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "batn3"
  top: "batn3"
}
layer {
  name: "score"
  type: "Convolution"
  bottom: "batn3"
  top: "score"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 2
    pad: 0
    kernel_size: 1
  }
}
layer {
  name: "threshold"
  type: "Threshold"
  bottom: "data"
  top: "threshold"
  threshold_param {
    threshold: 0
  }
}
layer {
  name: "concat"
  type: "Concat"
  bottom: "threshold"
  bottom: "threshold"
  top: "concat"
  concat_param {
    axis: 1
  }
}
layer {
  name: "eltwise"
  type: "Eltwise"
  bottom: "score"
  bottom: "concat"
  top: "eltwise"
  eltwise_param {
    operation: PROD
  }
}
layer {
  name: "softmax"
  type: "Softmax"
  bottom: "eltwise"
  top: "softmax"
}



Atena Nguyen

unread,
Jul 27, 2017, 1:49:16 AM7/27/17
to Caffe Users
Any suggestion ^^? 
Reply all
Reply to author
Forward
0 new messages