About error: code which I deliver is for one output. If you want more, you must chane the dimmension of numpy array, find this in comment.
And remember to set promerties of memory later like batch size and width(width is a number of input in this case)
name: "LogisticRegressionNet"
layers {
name: "data"
type: MEMORY_DATA
top: "data"
top: "label"
memory_data_param {
batch_size: 1100 #batch size, so how many prediction youu want to do at once. Best is "1", but higher number get better performance
channels: 1
height: 1
width: 3593 #number of input
}
}
layers {
name: "fc1"
type: INNER_PRODUCT
bottom: "data"
top: "fc1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 50 #number of input of first layer
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "encode1neuron"
bottom: "fc1"
top: "encode1neuron"
type: SIGMOID
}
layers {
name: "fc3"
type: INNER_PRODUCT
bottom: "encode1neuron"
top: "fc3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 1 #number of output from regression
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
name: "LogisticRegressionNet"
layers {
name: "data"
type: HDF5_DATA
top: "data"
top: "label"
hdf5_data_param {
source: "/home/melgor/CODE/caffe/examples/Ind/soc/hdf5_classification/data/soc_train.txt" #input data in HDF5
batch_size: 800
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: HDF5_DATA
top: "data"
top: "label"
hdf5_data_param {
source: "/home/melgor/CODE/caffe/examples/Ind/soc/hdf5_classification/data/soc_test.txt"
batch_size: 400
}
include: { phase: TEST }
}
layers {
name: "fc1"
type: INNER_PRODUCT
bottom: "data"
top: "fc1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 50 # number of output in first layer
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "encode1neuron"
bottom: "fc1"
top: "encode1neuron"
type: SIGMOID
}
layers {
name: "fc3"
type: INNER_PRODUCT
bottom: "encode1neuron"
top: "fc3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 1 # number of output from regression task
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "loss"
type: EUCLIDEAN_LOSS #loos function
bottom: "fc3"
bottom: "label"
top: "loss"
}
##########################
# Predict using Caffe model
###########################
# Make sure that caffe is on the python path:
caffe_root = '/home/melgor/CODE/caffe/' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# Set the right path to your model definition file, pretrained model weights,
# and the image you would like to classify.
MODEL_FILE = 'p/train_val.prototxt'
PRETRAINED = 'p/_iter_10000.caffemodel'
# from sklearn.feature_selection import RFE
max_value = batch_size from .prototxt
net = caffe.Net (MODEL_FILE,PRETRAINED)
net.set_phase_test()
net.set_mode_cpu()
data4D = np.zeros([max_value,1,1,3593]) #create 4D array, first value is batch_size, last number of inputs
data4DL = np.zeros([max_value,1,1,1]) # need to create 4D array as output, first value is batch_size, last number of outputs
data4D[0:max_value,0,0,:] = xtrain[0:max_value,:] # fill value of input xtrain is your value which you would like to predict
print [(k, v[0].data.shape) for k, v in net.params.items()]
net.set_input_arrays(data4D.astype(np.float32),data4DL.astype(np.float32))
pred = net.forward()
pred_normal = np.zeros([max_value,5])
for i in range(0,max_value):
pred_normal[i,0] = pred['fc3'][i][0]
#plot result
import matplotlib.pyplot as plt
time = [ i for i in range(0,max_value)]
plt.plot(time, ytrain, 'b',label='Real')
plt.plot(time, pred_normal[:,0], 'r',label='Predict')
plt.grid(True)
plt.legend()
plt.show()
with h5py.File(DATA_NAME, 'r') as f:
h5Data = f['data'][()]
h5Img = f['img'][()]
data4D = numpy.zeros([BATCH_SIZE,1,40,60])
resultsArray = []
for i in xrange(0, DATA_SIZE, BATCH_SIZE):
stop = i + BATCH_SIZE
#print('Loading data from {0} to {1}'.format(i, stop))
data4D = h5Data[i:stop]
if (data4D.shape[0] < BATCH_SIZE):
rows = data4D.shape[0]
extraRows = numpy.zeros([BATCH_SIZE, 1, 40, 60])
extraRows[:rows] = data4D
data4D = extraRows
data4DLabels = numpy.zeros([BATCH_SIZE,1,1,1])
net.set_input_arrays(data4D.astype(numpy.float32), data4DLabels.astype(numpy.float32))
prediction = net.forward()
if (i == 0):
resultsArray = prediction['ip2']
else:
resultsArray = numpy.concatenate((resultsArray, prediction['ip2']), axis=0)
# Get the results
predictionArray = numpy.zeros(DATA_SIZE)
for i in xrange(0, DATA_SIZE):
predictionArray[i] = resultsArray[i].argmax()
itemIndex = numpy.where(predictionArray == 1)
itemIndex = numpy.asarray(itemIndex)
print("Indexes: {0} - {1}".format(itemIndex.shape, itemIndex))
data4D = np.zeros([max_value,1,1,3593]) #create 4D array, first value is batch_size, last number of inputs
data4DL = np.zeros([max_value,1,1,1]) # need to create 4D array as output, first value is batch_size, last number of outputs
we have created 4 dimension arrays, first parameter understood and what about meaning of each of the other three parameters?