How to create multilayer autoencoder?

984 views
Skip to first unread message

dr.rom...@gmail.com

unread,
Jan 24, 2017, 9:10:49 AM1/24/17
to Keras-users
Hi,

I would like to create multi-layer autoencoder for vector analysis. I'm using tutorial https://blog.keras.io/building-autoencoders-in-keras.html I have a long binary vector (contains 0 or 1 for each dimension) representing an object with length e.g. 200.  Main idea is to train NN and to reduce a vector size to 10. Then I would like to compare these reduced vectors. An autoencoder with a single fully-connected neural layer from this tutorial compiled and seems to work. But when I try to use multiple hidden layers like in tutorial, I can't even compile source code. My code is below. As you can see I tried all possible combinations but can't define decoder. I get error:

---------------------------------------------------------------------------
AssertionError                            Traceback (most recent call last)
<ipython-input-24-10401e276f94> in <module>()
     63 #decoder_3 = Model(input = encoded_input_3, output = decoder_layer_3(encoded_input_3))
     64 
---> 65 decoder_1 = Model(input=encoded_input_1, output=decoder_layer_3(decoder_layer_2(decoder_layer_1(encoded_input_1))))
     66 
     67 '''

/usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in __call__(self, x, mask)
    567         if inbound_layers:
    568             # This will call layer.build() if necessary.
--> 569             self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
    570             # Outputs were already computed when calling self.add_inbound_node.
    571             outputs = self.inbound_nodes[-1].output_tensors

/usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in add_inbound_node(self, inbound_layers, node_indices, tensor_indices)
    630         # creating the node automatically updates self.inbound_nodes
    631         # as well as outbound_nodes on inbound layers.
--> 632         Node.create_node(self, inbound_layers, node_indices, tensor_indices)
    633 
    634     def get_output_shape_for(self, input_shape):

/usr/local/lib/python2.7/dist-packages/keras/engine/topology.pyc in create_node(cls, outbound_layer, inbound_layers, node_indices, tensor_indices)
    166             # TODO: try to auto-infer shape
    167             # if exception is raised by get_output_shape_for.
--> 168             output_shapes = to_list(outbound_layer.get_output_shape_for(input_shapes[0]))
    169         else:
    170             output_tensors = to_list(outbound_layer.call(input_tensors, mask=input_masks))

/usr/local/lib/python2.7/dist-packages/keras/layers/core.pyc in get_output_shape_for(self, input_shape)
    773     def get_output_shape_for(self, input_shape):
    774         assert input_shape and len(input_shape) >= 2
--> 775         assert input_shape[-1] and input_shape[-1] == self.input_dim
    776         output_shape = list(input_shape)
    777         output_shape[-1] = self.output_dim

AssertionError: 


How should I correctly define decoder? And how can I access trained 10 number vectors? And map it to original objects?


I found some similar codings http://stackoverflow.com/questions/37758496/python-keras-theano-wrong-dimensions-for-deep-autoencoder or http://stackoverflow.com/questions/40118965/keras-deep-autoencoder-prediction-is-inaccurate. But it doesn't help.

Thank you in advance, I would appreciate any help.

Regards,

Roman

##############

from keras.layers import Input, Dense
from keras.models import Model

# this is the size of our encoded representations
VECTOR_SIZE = 200

input_doc = Input(shape=(VECTOR_SIZE,))
encoded = Dense(VECTOR_SIZE, activation='relu')(input_doc)

#'''
#encoded = Dense(200, activation='relu')(input_doc)
encoded = Dense(50, activation='relu')(encoded)
encoded = Dense(10, activation='relu')(encoded)

###decoded = Dense(10, activation='relu')(encoded)
decoded = Dense(50, activation='relu')(encoded)
#decoded = Dense(200, activation='relu')(decoded)
decoded = Dense(VECTOR_SIZE, activation='sigmoid')(decoded)
#'''

#decoded = Dense(VECTOR_SIZE, activation='sigmoid')(encoded)



# this model maps an input to its reconstruction
autoencoder = Model(input=input_doc, output=decoded)


# this model maps an input to its encoded representation
encoder = Model(input=input_doc, output=encoded)
#encoder_layer = autoencoder.layers[0]
#encoder_layer2 = autoencoder.layers[1]
#encoder_layer3 = autoencoder.layers[2]

#encoder10 = Model(input=input_doc, output=encoder_layer3(encoder_layer2(encoder_layer(input_doc))))




# create a placeholder for an encoded (32-dimensional) input
encoded_input_1 = Input(shape=(10,))
encoded_input_2 = Input(shape=(50,))
encoded_input_3 = Input(shape=(VECTOR_SIZE,))

# retrieve the last layer of the autoencoder model
decoder_layer_1 = autoencoder.layers[-3]
decoder_layer_2 = autoencoder.layers[-2]
decoder_layer_3 = autoencoder.layers[-1]

# create the decoder model
#decoder_1 = Model(input = encoded_input_1, output = decoder_layer_1(encoded_input_1))
#decoder_2 = Model(input = encoded_input_2, output = decoder_layer_2(encoded_input_2))
#decoder_3 = Model(input = encoded_input_3, output = decoder_layer_3(encoded_input_3))

decoder_1 = Model(input=encoded_input_1, output=decoder_layer_3(decoder_layer_2(decoder_layer_1(encoded_input_1))))

'''

# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(VECTOR_SIZE,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]

decoder_layer3 = autoencoder.layers[-3]
decoder_layer2 = autoencoder.layers[-2]
#decoder_layer3 = autoencoder.layers[-1]

# create the decoder model
##decoder10 = Model(input=encoded_input, output=decoder_layer(decoder_layer2(decoder_layer3(encoded_input))))
#decoder10 = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(decoder_layer(encoded_input))))
'''

# create the decoder model
#decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))

autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

# train autoencoder
autoencoder.fit(train_vectors, train_vectors,
                nb_epoch=50,
                batch_size=256,
                shuffle=True,
                validation_data=(test_vectors, test_vectors))


# after encoder reaches stable train/test loss value
autoencoder.summary()
encoder.summary()
decoder.summary()

print 'decoders:'
#decoder10.summary()
decoder_1.summary()
decoder_2.summary()
decoder_3.summary()

Jim Sharpe

unread,
Jan 30, 2017, 12:02:07 PM1/30/17
to Keras-users, dr.rom...@gmail.com
I'm also interested in seeing an example of how to use Keras to create a multilayer generational autoencoder

bale...@lbl.gov

unread,
Sep 3, 2017, 10:58:01 PM9/3/17
to Keras-users, dr.rom...@gmail.com
This is working example of multi-layer encoder/decoder implemented with Keras API
Thanks
Jan

#-------------------

def build_model():

    ''' based on  https://blog.keras.io/building-autoencoders-in-keras.html

    '''

    # this is MNIST input placeholder

    input_img = Input(shape=(784,),name='inp_img')

    net = Dense(encoding_dim*4, activation='relu',name='img2zip')(input_img)

    encoded = Dense(encoding_dim, activation='relu',name='zip_data')(net)


    # "decoded" is the lossy reconstruction of the input


    net = Dense(encoding_dim*4, activation='relu',name='zip2img')(encoded)

    decoded = Dense(784, activation='sigmoid',name='out_img')(net)


    #1) this model maps an input to its reconstruction

    autoencoderM = Model(input_img, decoded)


    #2) this model maps an input to its encoded representation

    encoderM = Model(input_img, encoded)


    # create a placeholder for an encoded (zip-dimensional) input

    encoded_input = Input(shape=(encoding_dim,),name='zip_input')

 

    # retrieve last 2 layers of the autoencoder model

    net = autoencoderM.layers[-2](encoded_input)

    net = autoencoderM.layers[-1](net)

    

    #3) this model maps encoded representation to input image

    decoderM = Model(encoded_input, net)


    autoencoderM.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])

    

    print('\nFull auto-encoder:',autoencoderM.count_params())

    autoencoderM.summary() # will print


    print('\nEncoder:',encoderM.count_params())

    encoderM.summary() # will print

 

    print('\nDecoder:',decoderM.count_params())

    decoderM.summary() # will print


    return autoencoderM,encoderM,decoderM


- - - 

#use case:


autoM,encoM,decoM=build_model()


hir=autoM.fit(X,X,shuffle=True, verbose=args.verb,

          validation_data=(X_test, X_test),

          batch_size=args.batch_size, nb_epoch=args.epochs )


encoded_imgs = encoM.predict(X_test)

decoded_imgs = decoM.predict(encoded_imgs)


Reply all
Reply to author
Forward
0 new messages