#ifndef CAFFE_NORMALIZE_LAYER_HPP_#define CAFFE_NORMALIZE_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"#include "caffe/layer.hpp"#include "caffe/proto/caffe.pb.h"namespace caffe {/** * @brief Normalizes input. */template <typename Dtype>class NormalizeLayer : public Layer<Dtype> { public: explicit NormalizeLayer(const LayerParameter& param) : Layer<Dtype>(param) {} virtual void Reshape(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top); virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); // virtual inline LayerParameter_LayerType type() const { // return LayerParameter_LayerType_NORMALIZE; // } virtual inline const char* type() const { return "Normalize"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int ExactNumTopBlobs() const { return 1; }
protected: virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top); virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top); virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>& bottom); virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>& bottom);
Blob<Dtype> sum_multiplier_, norm_, squared_; int norm;};} // namespace caffe
#endif // CAFFE_NORMALIZE_LAYER_HPP_
#include <algorithm>#include <vector>#include <cmath>
#include "caffe/layer.hpp"#include "caffe/layers/normalize_layer.hpp"#include "caffe/util/math_functions.hpp"#include "caffe/vision_layers.hpp"
namespace caffe {template <typename Dtype>void NormalizeLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { NormalizeParameter normalize_param = this->layer_param_.normalize_param(); CHECK_GT(normalize_param.norm(), 0) << "norm must be > 0"; norm = normalize_param.norm();} template <typename Dtype>void NormalizeLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top) { top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); squared_.Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());}
template <typename Dtype>void NormalizeLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* squared_data = squared_.mutable_cpu_data(); int n = bottom[0]->num(); int d = bottom[0]->count() / n; caffe_sqr<Dtype>(n*d, bottom_data, squared_data); for (int i=0; i<n; ++i) { Dtype normsqr = caffe_cpu_asum<Dtype>(d, squared_data+i*d); caffe_cpu_scale<Dtype>(d, pow(normsqr, -0.5), bottom_data+i*d, top_data+i*d); }}
template <typename Dtype>void NormalizeLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); int n = top[0]->num(); int d = top[0]->count() / n; for (int i=0; i<n; ++i) { Dtype a = caffe_cpu_dot(d, top_data+i*d, top_diff+i*d); caffe_cpu_scale(d, a, top_data+i*d, bottom_diff+i*d); caffe_sub(d, top_diff+i*d, bottom_diff+i*d, bottom_diff+i*d); a = caffe_cpu_dot(d, bottom_data+i*d, bottom_data+i*d); caffe_cpu_scale(d, Dtype(pow(a, -0.5)), bottom_diff+i*d, bottom_diff+i*d); }}
#ifdef CPU_ONLYSTUB_GPU(NormalizeLayer);#endif
INSTANTIATE_CLASS(NormalizeLayer);REGISTER_LAYER_CLASS(Normalize);
} // namespace caffe
#include <algorithm>#include <cfloat>#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layer.hpp"#include "caffe/util/math_functions.hpp"#include "caffe/vision_layers.hpp"#include "caffe/layers/normalize_layer.hpp"
namespace caffe {
template <typename Dtype>void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>* >& bottom, vector<Blob<Dtype>* >& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* squared_data = squared_.mutable_gpu_data(); Dtype normsqr; int n = bottom[0]->num(); int d = bottom[0]->count() / n; caffe_gpu_powx(n*d, bottom_data, Dtype(2), squared_data); for (int i=0; i<n; ++i) { caffe_gpu_asum<Dtype>(d, squared_data+i*d, &normsqr); caffe_gpu_scale<Dtype>(d, pow(normsqr, -0.5), bottom_data+i*d, top_data+i*d); }}
template <typename Dtype>void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>* >& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int n = top[0]->num(); int d = top[0]->count() / n; Dtype a; for (int i=0; i<n; ++i) { caffe_gpu_dot(d, top_data+i*d, top_diff+i*d, &a); caffe_gpu_scale(d, a, top_data+i*d, bottom_diff+i*d); caffe_gpu_sub(d, top_diff+i*d, bottom_diff+i*d, bottom_diff+i*d); caffe_gpu_dot(d, bottom_data+i*d, bottom_data+i*d, &a); caffe_gpu_scale(d, Dtype(pow(a, -0.5)), bottom_diff+i*d, bottom_diff+i*d); }}
INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer);
} // namespace caffe