I0707 21:09:27.700299 5238 solver.cpp:289] Solving
I0707 21:09:27.700309 5238 solver.cpp:290] Learning Rate Policy: fixed
I0707 21:09:27.701439 5238 solver.cpp:347] Iteration 0, Testing net (#0)
I0707 21:09:27.701453 5238 net.cpp:676] Ignoring source layer data
F0707 21:09:27.876583 5238 blob.hpp:140] Check failed: num_axes() <= 4 (5 vs. 4) Cannot use legacy accessors on Blobs with > 4 axes.
*** Check failure stack trace: ***
Aborted (core dumped)
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "/home/sghos003/Desktop/caffe-master/build/install/python/training/test_cmr.txt"
batch_size: 64
}
}
layer {
name: "data2"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TEST
}
hdf5_data_param {
source: "/home/sghos003/Desktop/caffe-master/build/install/python/training/train_cmr.txt"
batch_size: 10
}
}
layer {
name: "deconv1"
type: "Deconvolution"
bottom: "data"
top: "deconv1"
param {
lr_mult: 0.10000000149011612
decay_mult: 0.0
}
param {
lr_mult: 0.0
decay_mult: 0.0
}
convolution_param {
num_output: 1
pad: 7
pad: 0
pad: 0
kernel_size: 19
kernel_size: 1
kernel_size: 1
stride: 5
stride: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "deconv1"
top: "conv1"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "conv1"
top: "bn1"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "bn1"
top: "bn1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "bn1"
top: "conv2"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "bn2"
top: "bn2"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "bn2"
top: "conv3"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "bn3"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "bn3"
top: "bn3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "bn3"
top: "conv4"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "bn4"
type: "BatchNorm"
bottom: "conv4"
top: "bn4"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "bn4"
top: "bn4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "bn4"
top: "conv5"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "bn5"
type: "BatchNorm"
bottom: "conv5"
top: "bn5"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "bn5"
top: "bn5"
}
layer {
name: "conv6"
type: "Convolution"
bottom: "bn5"
top: "conv6"
param {
lr_mult: 0.10000000149011612
}
param {
lr_mult: 0.10000000149011612
}
convolution_param {
num_output: 1
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000474974513
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "recon"
type: "Eltwise"
bottom: "deconv1"
bottom: "conv6"
top: "recon"
eltwise_param {
operation: SUM
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "recon"
bottom: "label"
top: "loss"
}