Hi,
I tried the imagenet example, and got an error msg saying that the mdb fails with Permission Denied. I put the imagenet lmdb data in /home/common/imagenet and changed the proto txt accordingly; also generated the image mean file. Has anyone got such error before? Any suggestions will be greatly appreciated!
The related part of the output is (and all the output is also shown below):
I0513 10:22:08.295487 1273 layer_factory.hpp:74] Creating layer data
I0513 10:22:08.295840 1273 net.cpp:84] Creating Layer data
I0513 10:22:08.295883 1273 net.cpp:338] data -> data
I0513 10:22:08.295980 1273 net.cpp:338] data -> label
I0513 10:22:08.296025 1273 net.cpp:113] Setting up data
F0513 10:22:08.296128 1273 db.hpp:109] Check failed: mdb_status == 0 (13 vs. 0) Permission denied
*** Check failure stack trace: ***
@ 0x7f456cb72b7d google::LogMessage::Fail()
@ 0x7f456cb74c7f google::LogMessage::SendToLog()
@ 0x7f456cb7276c google::LogMessage::Flush()
@ 0x7f456cb7551d google::LogMessageFatal::~LogMessageFatal()
@ 0x7f456cf820ce caffe::db::LMDB::Open()
@ 0x7f456cf316e4 caffe::DataLayer<>::DataLayerSetUp()
@ 0x7f456cf167e6 caffe::BaseDataLayer<>::LayerSetUp()
@ 0x7f456cf168e9 caffe::BasePrefetchingDataLayer<>::LayerSetUp()
@ 0x7f456cf5bb72 caffe::Net<>::Init()
@ 0x7f456cf5d632 caffe::Net<>::Net()
@ 0x7f456ceb95b0 caffe::Solver<>::InitTrainNet()
@ 0x7f456ceba6c3 caffe::Solver<>::Init()
@ 0x7f456ceba896 caffe::Solver<>::Solver()
@ 0x40c4a0 caffe::GetSolver<>()
@ 0x406471 train()
@ 0x404a11 main
@ 0x7f456c2a9ec5 (unknown)
@ 0x404fbd (unknown)
Aborted (core dumped)
====================================All output==========================================
I0513 10:22:07.979399 1273 caffe.cpp:113] Use GPU with device ID 0
I0513 10:22:08.286501 1273 caffe.cpp:121] Starting Optimization
I0513 10:22:08.286767 1273 solver.cpp:32] Initializing solver from parameters:
test_iter: 1000
test_interval: 1000
base_lr: 0.01
display: 20
max_iter: 450000
lr_policy: "step"
gamma: 0.1
momentum: 0.9
weight_decay: 0.0005
stepsize: 100000
snapshot: 10000
snapshot_prefix: "models/bvlc_reference_caffenet/caffenet_train"
solver_mode: GPU
net: "models/bvlc_reference_caffenet/train_val.prototxt"
I0513 10:22:08.287188 1273 solver.cpp:70] Creating training net from net file: models/bvlc_reference_caffenet/train_val.prototxt
I0513 10:22:08.288521 1273 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data
I0513 10:22:08.288591 1273 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0513 10:22:08.288839 1273 net.cpp:42] Initializing net from parameters:
name: "CaffeNet"
state {
phase: TRAIN
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
data_param {
source: "/home/common/imagenet/ilsvrc12_train_lmdb"
batch_size: 256
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "norm1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0513 10:22:08.295487 1273 layer_factory.hpp:74] Creating layer data
I0513 10:22:08.295840 1273 net.cpp:84] Creating Layer data
I0513 10:22:08.295883 1273 net.cpp:338] data -> data
I0513 10:22:08.295980 1273 net.cpp:338] data -> label
I0513 10:22:08.296025 1273 net.cpp:113] Setting up data
F0513 10:22:08.296128 1273 db.hpp:109] Check failed: mdb_status == 0 (13 vs. 0) Permission denied
*** Check failure stack trace: ***
@ 0x7f456cb72b7d google::LogMessage::Fail()
@ 0x7f456cb74c7f google::LogMessage::SendToLog()
@ 0x7f456cb7276c google::LogMessage::Flush()
@ 0x7f456cb7551d google::LogMessageFatal::~LogMessageFatal()
@ 0x7f456cf820ce caffe::db::LMDB::Open()
@ 0x7f456cf316e4 caffe::DataLayer<>::DataLayerSetUp()
@ 0x7f456cf167e6 caffe::BaseDataLayer<>::LayerSetUp()
@ 0x7f456cf168e9 caffe::BasePrefetchingDataLayer<>::LayerSetUp()
@ 0x7f456cf5bb72 caffe::Net<>::Init()
@ 0x7f456cf5d632 caffe::Net<>::Net()
@ 0x7f456ceb95b0 caffe::Solver<>::InitTrainNet()
@ 0x7f456ceba6c3 caffe::Solver<>::Init()
@ 0x7f456ceba896 caffe::Solver<>::Solver()
@ 0x40c4a0 caffe::GetSolver<>()
@ 0x406471 train()
@ 0x404a11 main
@ 0x7f456c2a9ec5 (unknown)
@ 0x404fbd (unknown)
Aborted (core dumped)