seed = 7
np.random.seed(seed)
kfold = KFold(n_splits=10, shuffle=False, random_state=None)
print "Splits"
cvscores_acc = []
cvscores_loss = []
hist = []
i = 0
for train, test in kfold.split(train_set_data_vstacked_normalized):
print "Model definition!"
model = Sequential()
#act = PReLU(init='normal', weights=None)
model.add(Dense(output_dim=400,input_dim=400, init="normal",activation=K.tanh))
#act1 = PReLU(init='normal', weights=None)
model.add(Dense(output_dim=400,input_dim=400, init="normal",activation=K.tanh))
#act2 = PReLU(init='normal', weights=None)
#model.add(Dense(output_dim=400, input_dim=400, init="normal",activation=K.tanh))
act4=ELU(100)
model.add(Dense(output_dim=13, input_dim=400, init="normal",activation=act4))
print "Compiling"
model.compile(loss='mean_squared_error', optimizer='RMSprop')
print "Compile done! "
print '\n'
print "Train start"
reduce_lr=ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
log=csv_logger = CSVLogger('training_'+str(i)+'.csv')
#early_stop=EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=1, mode='auto')
hist_current = model.fit(train_set_data_vstacked_normalized[test],train_set_output_vstacked[test], shuffle=False,validation_split=0.1 , nb_epoch=1000,verbose=1,callbacks=[reduce_lr,log])
hist.append(hist_current)
loss, accuracy = model.evaluate(x=train_set_data_vstacked_normalized[train],y=train_set_output_vstacked[train],verbose=1)
print
print('loss: ', loss)
print('accuracy: ', accuracy)
print()
print model.summary()
print "New Model:"
cvscores_acc.append(accuracy)
cvscores_loss.append(loss)
print "Model stored"
model.save("Model"+str(i))
i=i+1
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores_acc), numpy.std(cvscores_acc)))
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores_loss), numpy.std(cvscores_loss)))