--------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-24-10401e276f94> in <module>() 63 #decoder_3 = Model(input = encoded_input_3, output = decoder_layer_3(encoded_input_3)) 64 ---> 65 decoder_1 = Model(input=encoded_input_1, output=decoder_layer_3(decoder_layer_2(decoder_layer_1(encoded_input_1)))) 66 67 ''' /usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in __call__(self, x, mask) 567 if inbound_layers: 568 # This will call layer.build() if necessary. --> 569 self.add_inbound_node(inbound_layers, node_indices, tensor_indices) 570 # Outputs were already computed when calling self.add_inbound_node. 571 outputs = self.inbound_nodes[-1].output_tensors /usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in add_inbound_node(self, inbound_layers, node_indices, tensor_indices) 630 # creating the node automatically updates self.inbound_nodes 631 # as well as outbound_nodes on inbound layers. --> 632 Node.create_node(self, inbound_layers, node_indices, tensor_indices) 633 634 def get_output_shape_for(self, input_shape): /usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in create_node(cls, outbound_layer, inbound_layers, node_indices, tensor_indices) 166 # TODO: try to auto-infer shape 167 # if exception is raised by get_output_shape_for. --> 168 output_shapes = to_list(outbound_layer.get_output_shape_for(input_shapes[0])) 169 else: 170 output_tensors = to_list(outbound_layer.call(input_tensors, mask=input_masks)) /usr/local/lib/python2.7/dist-packages/keras/layers/core.pyc in get_output_shape_for(self, input_shape) 773 def get_output_shape_for(self, input_shape): 774 assert input_shape and len(input_shape) >= 2 --> 775 assert input_shape[-1] and input_shape[-1] == self.input_dim 776 output_shape = list(input_shape) 777 output_shape[-1] = self.output_dim AssertionError:
#-------------------
def build_model():
''' based on https://blog.keras.io/building-autoencoders-in-keras.html
'''
# this is MNIST input placeholder
input_img = Input(shape=(784,),name='inp_img')
net = Dense(encoding_dim*4, activation='relu',name='img2zip')(input_img)
encoded = Dense(encoding_dim, activation='relu',name='zip_data')(net)
# "decoded" is the lossy reconstruction of the input
net = Dense(encoding_dim*4, activation='relu',name='zip2img')(encoded)
decoded = Dense(784, activation='sigmoid',name='out_img')(net)
#1) this model maps an input to its reconstruction
autoencoderM = Model(input_img, decoded)
#2) this model maps an input to its encoded representation
encoderM = Model(input_img, encoded)
# create a placeholder for an encoded (zip-dimensional) input
encoded_input = Input(shape=(encoding_dim,),name='zip_input')
# retrieve last 2 layers of the autoencoder model
net = autoencoderM.layers[-2](encoded_input)
net = autoencoderM.layers[-1](net)
#3) this model maps encoded representation to input image
decoderM = Model(encoded_input, net)
autoencoderM.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
print('\nFull auto-encoder:',autoencoderM.count_params())
autoencoderM.summary() # will print
print('\nEncoder:',encoderM.count_params())
encoderM.summary() # will print
print('\nDecoder:',decoderM.count_params())
decoderM.summary() # will print
return autoencoderM,encoderM,decoderM
- - -
#use case:
autoM,encoM,decoM=build_model()
hir=autoM.fit(X,X,shuffle=True, verbose=args.verb,
validation_data=(X_test, X_test),
batch_size=args.batch_size, nb_epoch=args.epochs )
encoded_imgs = encoM.predict(X_test)
decoded_imgs = decoM.predict(encoded_imgs)