class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=shape,
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape)
def call(self, inputs):
y = tf.matmul(inputs,self.kernel)
return (y)
model = tf.keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
MyLayer(20, input_shape=(1, 784)),
#MyLayer(input_shape=(10,)),
layers.Activation('relu'),
MyLayer(10,input_shape=(1, 20)),
#MyLayer(input_shape=(10,)),
layers.Activation('relu'),
keras.layers.Dense(10, input_shape=(1, 10), activation='softmax')])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=1, batch_size=1,
validation_data=(val_data, val_labels))
Traceback (most recent call last):
File "custom_layer.py", line 156, in <module>
ValueError: An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.