let me exmplain my case.
I would like get a regression (also a classification is fine) starting from a dataset having size equal to (something, 140, 50, 50).
In other words n examples, 140 channels, 50x50 images.
For training step I have more or less 16000 cases, 6000 for validation and 1000 for testing.
The labels ranges from 0.6 to 0.8.
Of course I can multiply them in order to get int.
Both regression or classification (e.g among 0.60, 0.65, 0.70, 0.75, 0.80) are fine for me.
The problem is that accuracy is always 0.0%.
The code is posted below.
Thank you very much.
Best.
def build_cnn(single_entry_shape, input_var=None):
network = lasagne.layers.InputLayer(shape=(None, single_entry_shape[0], single_entry_shape[1], single_entry_shape[2]),
input_var=input_var)
network = lasagne.layers.Conv2DLayer(
network, num_filters=200, filter_size=(11, 11),
nonlinearity=lasagne.nonlinearities.leaky_rectify,
W=lasagne.init.HeNormal())
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
network = lasagne.layers.Conv2DLayer(
network, num_filters=100, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.leaky_rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
network = lasagne.layers.Conv2DLayer(
network, num_filters=50, filter_size=(3, 3),
nonlinearity=lasagne.nonlinearities.leaky_rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=20,
nonlinearity=lasagne.nonlinearities.linear)
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=4,
nonlinearity=lasagne.nonlinearities.linear)
return network
num_epochs = 100
batchsize = 300
single_entry_shape = X_train.shape[1:]
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = build_cnn(single_entry_shape, input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.00000001, momentum=0.9)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var) #
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
train_fn = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc], allow_input_downcast=True)
print("Starting training...")
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, batchsize, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batchsize, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, batchsize, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))