model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# So what happens after this to have 5 independent fully connected layers to recognise the multiple digits?
x = Input((img_channels, img_rows, img_cols))
y = Convolution2D(32, 3, 3, activation="relu", border_mode="same")(x)
y = Convolution2D(32, 3, 3, activation="relu")(y)
y = MaxPooling2D((2, 2))(y)
y = Dropout(0.25)(y)
y = Convolution2D(64, 3, 3, border_mode="same", activation="relu")(y)
y = Convolution2D(64, 3, 3, activation="relu")(y)
y = MaxPooling2D((2, 2))(y)
y = Dropout(0.25)(y)
y = Flatten()(y)
y = Dense(1024, activation="relu")(y)
length = Dense(4, activation="softmax")(y)
digit1 = Dense(10, activation="softmax")(y)
digit2 = Dense(10, activation="softmax")(y)
digit3 = Dense(10, activation="softmax")(y)
model = Model(input=x, output=[length, digit1, digit2, digit3])
Exception: The model expects 5 input arrays, but only received one array. Found: array with shape (188602, 5, 11)
batch_size = 128
nb_classes = 10
nb_epoch = 2
_, img_rows, img_cols, img_channels = X_train.shape
model_input = Input(shape=(img_rows, img_cols, img_channels))
x = Convolution2D(32, 3, 3, border_mode='same')(model_input)
x = Activation('relu')(x)
x = Convolution2D(32, 3, 3)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x1 = Dense(nb_classes, activation='softmax')(x)
x2 = Dense(nb_classes, activation='softmax')(x)
x3 = Dense(nb_classes, activation='softmax')(x)
x4 = Dense(nb_classes, activation='softmax')(x)
x5 = Dense(nb_classes, activation='softmax')(x)
lst = [x1, x2, x3, x4, x5]
model = Model(input=model_input, output=lst)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(X_train, y_train_dummy, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_val, y_val_dummy))
model.fit(x, [y1, y2, y3, y4, y5], validatation_data=(vx, [vy1, vy2, vy3, vy4, vy5]))
Train on 188602 samples, validate on 47151 samples
Epoch 1/10 188602/188602 [==============================] - 229s - loss: 4.2374 - dense_14_loss: 1.4432 - dense_15_loss: 1.6796 - dense_16_loss: 0.9792 - dense_17_loss: 0.1342 - dense_18_loss: 0.0013 - dense_14_acc: 0.5091 - dense_15_acc: 0.3580 - dense_16_acc: 0.3715 - dense_17_acc: 0.4727 - dense_18_acc: 0.0039 - val_loss: 2.3544 - val_dense_14_loss: 0.7367 - val_dense_15_loss: 0.9298 - val_dense_16_loss: 0.5922 - val_dense_17_loss: 0.0947 - val_dense_18_loss: 0.0010 - val_dense_14_acc: 0.7791 - val_dense_15_acc: 0.6632 - val_dense_16_acc: 0.4265 - val_dense_17_acc: 0.1512 - val_dense_18_acc: 0.0154
Subsequently I ran model.evaluate it has that many accuracies (5)
score = model.evaluate(X_val, y_val_lst, verbose=1)
print('Validation error:', 100 - score[1]*100)There are multiple accuracy scores on top (5 actually for 5 branches/output). Does it mean Keras automatically calculates the accuracy for say "712" from an image showing "712" after training through model.evaluate?
How do we calculate the entire accuracy "712" instead of individual numbers?
y_pred_list = model.predict(x_val)
correct_preds = 0
for i in xrange(x_val.shape[0]): #iterate over sample dimension
pred_list_i = [y_pred_list[i] for y_pred in y_pred_list]
val_list_i = [y_val_list[i] for y_val in y_val_list]
matching_preds = [pred.argmax(-1) == val.argmax(-1) for pred, val in zip(pred_list_i, val_list_i)
correct_preds = int(np.all(matching_preds))
total_acc = correct_preds / float(x_val.shape[0])
code here...for i in xrange(x_val.shape[0]): p
red_list_i = [y_pred[i] for y_pred in y_pred_list]
val_list_i = [y_val[i] for y_val in y_val_list]
matching_preds = [pred.argmax(-1) == val.argmax(-1) for pred, val in zip(pred_list_i, val_list_i)
correct_preds = int(np.all(matching_preds))
model_input = Input(shape=(img_rows, img_cols, img_channels))
x = Convolution2D(32, 3, 3, border_mode='same')(model_input)
x = Activation('relu')(x)
x = Convolution2D(32, 3, 3, border_mode='same')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
# length = Dense(4, activation='softmax')(x)
digit_1 = Dense(nb_classes, activation='softmax')(x)
digit_2 = Dense(nb_classes, activation='softmax')(x)
digit_3 = Dense(nb_classes, activation='softmax')(x)
digit_4 = Dense(nb_classes, activation='softmax')(x)
digit_5 = Dense(nb_classes, activation='softmax')(x)
branches = [digit_1, digit_2, digit_3, digit_4, digit_5]
model = Model(input=model_input, output=branches)
# let's train the model using SGD + momentum
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])
history = model.fit(X_train, y_train_lst, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_val, y_val_lst))
correct_preds = 0
# Iterate over sample dimension
for i in range(X_val.shape[0]):
pred_list_i = [y_pred[i] for y_pred in y_pred_list]
val_list_i = [y_val[i] for y_val in y_val_lst]
matching_preds = [pred.argmax(-1) == val.argmax(-1) for pred, val in zip(pred_list_i, val_list_i)]
correct_preds = int(np.all(matching_preds))
total_acc = (correct_preds / float(X_val.shape[0]))*100
print(total_acc)
Epoch 8/10
188602/188602 [==============================] - 248s - loss: 0.9182 - dense_8_loss: 0.2776 - dense_9_loss: 0.3641 - dense_10_loss: 0.2361 - dense_11_loss: 0.0395 - dense_12_loss: 8.5433e-04 - dense_8_categorical_accuracy: 0.9130 - dense_9_categorical_accuracy: 0.8242 - dense_10_categorical_accuracy: 0.5283 - dense_11_categorical_accuracy: 0.1643 - dense_12_categorical_accuracy: 0.1027 - val_loss: 0.9377 - val_dense_8_loss: 0.2888 - val_dense_9_loss: 0.3668 - val_dense_10_loss: 0.2378 - val_dense_11_loss: 0.0434 - val_dense_12_loss: 8.4473e-04 - val_dense_8_categorical_accuracy: 0.9160 - val_dense_9_categorical_accuracy: 0.8329 - val_dense_10_categorical_accuracy: 0.5340 - val_dense_11_categorical_accuracy: 0.1562 - val_dense_12_categorical_accuracy: 0.0739Epoch 9/10
188602/188602 [==============================] - 248s - loss: 0.8456 - dense_8_loss: 0.2573 - dense_9_loss: 0.3367 - dense_10_loss: 0.2145 - dense_11_loss: 0.0362 - dense_12_loss: 7.9260e-04 - dense_8_categorical_accuracy: 0.9189 - dense_9_categorical_accuracy: 0.8320 - dense_10_categorical_accuracy: 0.5320 - dense_11_categorical_accuracy: 0.1652 - dense_12_categorical_accuracy: 0.1109 - val_loss: 0.9151 - val_dense_8_loss: 0.2821 - val_dense_9_loss: 0.3587 - val_dense_10_loss: 0.2317 - val_dense_11_loss: 0.0417 - val_dense_12_loss: 8.2378e-04 - val_dense_8_categorical_accuracy: 0.9185 - val_dense_9_categorical_accuracy: 0.8358 - val_dense_10_categorical_accuracy: 0.5308 - val_dense_11_categorical_accuracy: 0.1610 - val_dense_12_categorical_accuracy: 0.0831Epoch 10/10
188602/188602 [==============================] - 247s - loss: 0.7835 - dense_8_loss: 0.2402 - dense_9_loss: 0.3115 - dense_10_loss: 0.1978 - dense_11_loss: 0.0332 - dense_12_loss: 7.1053e-04 - dense_8_categorical_accuracy: 0.9236 - dense_9_categorical_accuracy: 0.8406 - dense_10_categorical_accuracy: 0.5379 - dense_11_categorical_accuracy: 0.1640 - dense_12_categorical_accuracy: 0.1093 - val_loss: 0.9023 - val_dense_8_loss: 0.2776 - val_dense_9_loss: 0.3548 - val_dense_10_loss: 0.2266 - val_dense_11_loss: 0.0425 - val_dense_12_loss: 8.0618e-04 - val_dense_8_categorical_accuracy: 0.9197 - val_dense_9_categorical_accuracy: 0.8368 - val_dense_10_categorical_accuracy: 0.5357 - val_dense_11_categorical_accuracy: 0.1713 - val_dense_12_categorical_accuracy: 0.1190
To view this discussion on the web, visit https://groups.google.com/d/msgid/keras-users/c2f353e3-2424-46b5-9a47-f0b3b0f882fd%40googlegroups.com.--
You received this message because you are subscribed to the Google Groups "Keras-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to keras-users+unsubscribe@googlegroups.com.
correct_preds = 0
# Iterate over sample dimension
for i in range(X_val.shape[0]):
pred_list_i = [y_pred[i] for y_pred in y_pred_list]
val_list_i = [y_val[i] for y_val in y_val_lst]
matching_preds = [pred.argmax(-1) == val.argmax(-1) for pred, val in zip(pred_list_i, val_list_i)]
correct_preds = int(np.all(matching_preds))
total_acc = (correct_preds / float(X_val.shape[0]))*100
print(total_acc)
So my code is fine? I’m confused as to whether it’s my code, or that I’m not running long enough.
Because I’m trying to calculate the final validation accuracy and test accuracy.
Even with 10 epochs, does it make sense it’s 0?
correct_preds = 0 # Iterate over sample dimension for i in range(X_val.shape[0]): pred_list_i = [y_pred[i] for y_pred in y_pred_list] val_list_i = [y_val[i] for y_val in y_val_lst] matching_preds = [pred.argmax(-1) == val.argmax(-1) for pred, val in zip(pred_list_i, val_list_i)] correct_preds = int(np.all(matching_preds)) total_acc = (correct_preds / float(X_val.shape[0]))*100 print(total_acc)
model.fit(x, [y1, y2, y3, y4, y5], validatation_data=(vx, [vy1, vy2, vy3, vy4, vy5]))
Exception: Error when checking model target: expected length to have shape (None, 4) but got array with shape (111897, 1)
# Layer 0: Inputx = Input((img_rows, img_cols, img_channels))
# Layer 1: 48-unit maxout convolutiony = Convolution2D(nb_filter = 48, nb_row = 5, nb_col = 5, border_mode="same", name="1conv")(x)y = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), border_mode="same", name="1maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="1drop")(y)# y = MaxoutDense(output_dim = 48, nb_feature=3)(y)y = Activation('relu', name="1activ")(y)
# Layer 2: 64-unit relu convolutiony = Convolution2D(nb_filter = 64, nb_row = 5, nb_col = 5, border_mode="same", name="2conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (1, 1), border_mode="same", name="2maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="2drop")(y)y = Activation('relu', name="2activ")(y)
# Layer 3: 128-unit relu convolutiony = Convolution2D(nb_filter = 128, nb_row = 5, nb_col = 5, border_mode="same", name="3conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), border_mode="same", name="3maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="3drop")(y)y = Activation('relu', name="3activ")(y)
# Layer 4: 160-unit relu convolutiony = Convolution2D(nb_filter = 160, nb_row = 5, nb_col = 5, border_mode="same", name="4conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (1, 1), border_mode="same", name="4maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="4drop")(y)y = Activation('relu', name="4activ")(y)
# Layer 5: 192-unit relu convolutiony = Convolution2D(nb_filter = 192, nb_row = 5, nb_col = 5, border_mode="same", name="5conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), border_mode="same", name="5maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="5drop")(y)y = Activation('relu', name="5activ")(y)
# Layer 6: 192-unit relu convolutiony = Convolution2D(nb_filter = 192, nb_row = 5, nb_col = 5, border_mode="same", name="6conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (1, 1), border_mode="same", name="6maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="6drop")(y)y = Activation('relu', name="6activ")(y)
# Layer 7: 192-unit relu convolutiony = Convolution2D(nb_filter = 192, nb_row = 5, nb_col = 5, border_mode="same", name="7conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), border_mode="same", name="7maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="7drop")(y)y = Activation('relu', name="7activ")(y)
# Layer 8: 192-unit relu convolutiony = Convolution2D(nb_filter = 192, nb_row = 5, nb_col = 5, border_mode="same", name="8conv")(y)y = MaxPooling2D(pool_size = (2, 2), strides = (1, 1), border_mode="same", name="8maxpool")(y)# y = SubtractiveNormalization((3,3))(y)y = Dropout(0.25, name="8drop")(y)y = Activation('relu', name="8activ")(y)
# Layer 9: Flatteny = Flatten()(y)
# Layer 10: Fully-Connected Layery = Dense(3072, activation=None, name="fc1")(y)
# Layer 11: Fully-Connected Layery = Dense(3072, activation=None, name="fc2")(y)
length = Dense(4, activation="softmax", name="length")(y)digit1 = Dense(10, activation="softmax", name="digit1")(y)digit2 = Dense(10, activation="softmax", name="digit2")(y)digit3 = Dense(10, activation="softmax", name="digit3")(y)digit4 = Dense(10, activation="softmax", name="digit4")(y)digit5 = Dense(10, activation="softmax", name="digit5")(y)
model = Model(input=x, output=[length, digit1, digit2, digit3, digit4, digit5])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, [y0_train, y1_train, y2_train, y3_train, y4_train, y5_train], validation_data=(X_test, [y0_test, y1_test, y2_test, y3_test, y4_test, y5_test]), nb_epoch=10, batch_size=200, verbose=2)model.evaluate(X_test, [y0_test, y1_test, y2_test, y3_test, y4_test, y5_test], verbose=0)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])