i cannot understand why this error in "data_augmentation_layer" is coming. Please anyone has an idea ?
firas@firas-ThinkPad-L540:~/Documents/MA_Depth_Estimation_Using_Stereo/Disparity_Estimation_Using_Feature_Extraction/CAFFE_ROOT$ make all
examples/siamese examples/cifar10 examples/mnist examples/cpp_classification matlab/+caffe/private python/caffe src/caffe src/caffe/layers src/caffe/solvers src/caffe/test src/caffe/proto src/caffe/util src/gtest tools
CXX src/caffe/layers/data_augmentation_layer.cpp
In file included from ./include/caffe/common.hpp:19:0,
from ./include/caffe/blob.hpp:8,
from ./include/caffe/layer.hpp:8,
from src/caffe/layers/data_augmentation_layer.cpp:9:
src/caffe/layers/data_augmentation_layer.cpp:215:10: error: redefinition of ‘void caffe::DataAugmentationLayer<Dtype>::Backward_gpu(const std::vector<caffe::Blob<Dtype>*>&, const std::vector<bool>&, const std::vector<caffe::Blob<Dtype>*>&)’
STUB_GPU(DataAugmentationLayer);
^
./include/caffe/util/device_alternate.hpp:17:6: note: in definition of macro ‘STUB_GPU’
void classname<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, \
^
In file included from src/caffe/layers/data_augmentation_layer.cpp:10:0:
./include/caffe/layers/data_augmentation_layer.hpp:39:18: error: ‘virtual void caffe::DataAugmentationLayer<Dtype>::Backward_gpu(const std::vector<caffe::Blob<Dtype>*>&, const std::vector<bool>&, const std::vector<caffe::Blob<Dtype>*>&)’ previously declared here
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
^
src/caffe/layers/data_augmentation_layer.cpp: In instantiation of ‘void caffe::DataAugmentationLayer<Dtype>::adjust_blobs(std::vector<caffe::Blob<Dtype>*>) [with Dtype = float]’:
src/caffe/layers/data_augmentation_layer.cpp:218:1: required from here
src/caffe/layers/data_augmentation_layer.cpp:178:130: error: ‘caffe_gpu_memcpy’ was not declared in this scope
caffe_gpu_memcpy(this->blobs_[2]->count()*sizeof(float), tmp.mutable_cpu_data(), this->blobs_[2]->mutable_cpu_data());
^
src/caffe/layers/data_augmentation_layer.cpp: In instantiation of ‘void caffe::DataAugmentationLayer<Dtype>::adjust_blobs(std::vector<caffe::Blob<Dtype>*>) [with Dtype = double]’:
src/caffe/layers/data_augmentation_layer.cpp:218:1: required from here
src/caffe/layers/data_augmentation_layer.cpp:178:130: error: ‘caffe_gpu_memcpy’ was not declared in this scope
make: *** [.build_release/src/caffe/layers/data_augmentation_layer.o] Error 1
# Contributions simplifying and improving our build system are welcome!
# cuDNN acceleration switch (uncomment to build with cuDNN).
# USE_CUDNN := 1
# CPU-only switch (uncomment to build without GPU support).
CPU_ONLY := 1
# uncomment to disable IO dependencies and corresponding data layers
# USE_OPENCV := 0
# USE_LEVELDB := 0
# USE_LMDB := 0
# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)
# You should not set this flag if you will be reading LMDBs with any
# possibility of simultaneous read and write
# ALLOW_LMDB_NOLOCK := 1
# Uncomment if you're using OpenCV 3
# OPENCV_VERSION := 3
# To customize your choice of compiler, uncomment and set the following.
# N.B. the default for Linux is g++ and the default for OSX is clang++
# CUSTOM_CXX := g++
# CUDA directory contains bin/ and lib/ directories that we need.
CUDA_DIR := /usr/local/cuda
# On Ubuntu 14.04, if cuda tools are installed via
# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:
# CUDA_DIR := /usr
# CUDA architecture setting: going with all of them.
# For CUDA < 6.0, comment the *_50 lines for compatibility.
CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \
-gencode arch=compute_20,code=sm_21 \
-gencode arch=compute_30,code=sm_30 \
-gencode arch=compute_35,code=sm_35 \
-gencode arch=compute_50,code=sm_50 \
-gencode arch=compute_50,code=compute_50
# BLAS choice:
# atlas for ATLAS (default)
# mkl for MKL
# open for OpenBlas
BLAS := atlas
# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.
# Leave commented to accept the defaults for your choice of BLAS
# (which should work)!
# BLAS_INCLUDE := /path/to/your/blas
# BLAS_LIB := /path/to/your/blas
# Homebrew puts openblas in a directory that is not on the standard search path
# BLAS_INCLUDE := $(shell brew --prefix openblas)/include
# BLAS_LIB := $(shell brew --prefix openblas)/lib
# This is required only if you will compile the matlab interface.
# MATLAB directory should contain the mex binary in /bin.
# MATLAB_DIR := /usr/local
# MATLAB_DIR := /Applications/MATLAB_R2012b.app
# NOTE: this is required only if you will compile the python interface.
# We need to be able to find Python.h and numpy/arrayobject.h.
PYTHON_INCLUDE := /usr/include/python2.7 \
/usr/local/lib/python2.7/dist-packages/numpy/core/include
# /usr/lib/python2.7/dist-packages/numpy/core/include
# Anaconda Python distribution is quite popular. Include path:
# Verify anaconda location, sometimes it's in root.
# ANACONDA_HOME := $(HOME)/anaconda
# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
# $(ANACONDA_HOME)/include/python2.7 \
# $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \
# Uncomment to use Python 3 (default is Python 2)
# PYTHON_LIBRARIES := boost_python3 python3.5m
# PYTHON_INCLUDE := /usr/include/python3.5m \
# /usr/lib/python3.5/dist-packages/numpy/core/include
PYTHON_LIB := /usr/lib
# PYTHON_LIB := $(ANACONDA_HOME)/lib
# Homebrew installs numpy in a non standard path (keg only)
# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include
# PYTHON_LIB += $(shell brew --prefix numpy)/lib
# Uncomment to support layers written in Python (will link against Python libs)
WITH_PYTHON_LAYER := 1
# Whatever else you find you need goes here.
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies
# INCLUDE_DIRS += $(shell brew --prefix)/include
# LIBRARY_DIRS += $(shell brew --prefix)/lib
# Uncomment to use `pkg-config` to specify OpenCV library paths.
# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)
# USE_PKG_CONFIG := 1
# N.B. both build and distribute dirs are cleared on `make clean`
BUILD_DIR := build
DISTRIBUTE_DIR := distribute
# DEBUG := 1
# The ID of the GPU that 'make runtest' will use to run unit tests.
TEST_GPUID := 0
# enable pretty build (comment to see full commands)
Q ?= @