model = Sequential()
#Deepnet Archictecture
#c1
model.add(Convolution2D(nb_filter[0], nb_row[0], nb_col[0], border_mode=border_mode, input_shape=(no_of_channels,img_row,img_col),init=weight_init))
model.add(Activation(activation))
#m2
model.add(MaxPooling2D(pool_size = pool_size, strides = stride_size))
#c3
model.add(Convolution2D(nb_filter[1], nb_row[1], nb_col[1], border_mode=border_mode, init=weight_init))
model.add(Activation(activation))
#l4
model.add(LocallyConnected2D(nb_filter[2], nb_row[2], nb_col[2],border_mode=border_mode, init=weight_init))
model.add(Activation(activation))
# #l5
model.add(LocallyConnected2D(nb_filter[3], nb_row[3], nb_col[3],border_mode=border_mode, init=weight_init))
model.add(Activation(activation))
#L6 model.add(LocallyConnected2D(nb_filter[4], nb_row[4], nb_col[4],border_mode=border_mode, init=weight_init))
model.add(Activation(activation))
#Dropout
model.add(Dropout(dropout_percent[0]))
model.add(Flatten())
#F7
model.add(Dense(output_fc[0],init=weight_init))
model.add(Activation(activation))
#normalization l2
#F8
# model.add(Dense(output_fc[1],init=weight_init))
model.add(Dense(nb_classes, activation='softmax',init=weight_init))
#learning process
optimizer_sgd = SGD(lr=lr_rate, decay=1e-6, momentum=0.9, nesterov=True)
The probelm I am facing is I am not able to bring down the loss values or over fit the model. Is there any one who has implemented this paper in keras successfully.