TypeError: Not JSON Serializable: <module 'tensorflow' from '/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/tensorflow/__init__.py'>

640 views
Skip to first unread message

Pejvak Moghimi

unread,
Aug 14, 2019, 11:24:09 AM8/14/19
to Keras-users
Hi All,

The following code gives me an error when saving the model. I have similar issues, but not quite the same, on the keras github page, but, installing the tf-nightly seem to have resolved those issues. I uninstalled tensorflow and installed the tf-nightly version to no avail.

This problem has been going on for a long time and is obviously crucial to my work, so, any help to solve it would be greatly appreciated.

Cheers,
Pejvak.


# %time
from keras.utils import plot_model
# import keras
# import pydot as pyd
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers import Bidirectional
import h5py
# keras.utils.vis_utils.pydot = pyd
import logging
import time

ts = time.time()
time = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
log_dir ='/d/as2/u/mp002/chain_pairing/logs/{}/server_0'.format(date)

if not os.path.isdir(log_dir):
    os.makedirs(log_dir)

#Visualize Model
def visualize_model(model):
    SVG(model_to_dot(model).create(prog='dot', format='svg'))
    return plot_model(model, show_shapes=True, show_layer_names=True, 
                      to_file=os.path.join(log_dir, "network_architecture_{}.svg".format(time)))

def keras_1D_CNN():
    
    model = models.Sequential()
    model.add(Convolution1D(input_shape=(253,21), 
                            kernel_initializer="he_uniform", 
                            bias_initializer=initializers.Constant(0.1), 
                            nb_filter=256,
                            filter_length=2,
                            padding="same",
                            activation="relu", 
                            dilation_rate=2, 
                            name='conv-1D_1'))
    model.add(BatchNormalization(name='BatchNorm_1'))
    model.add(MaxPooling1D(pool_size=2, strides=1, name='MaxPool1D_1'))
    
    model.add(layers.Conv1D(128, 3, activation='relu', padding="same", 
                            kernel_initializer="he_uniform", 
                            bias_initializer=initializers.Constant(0.1), 
                            dilation_rate=3, 
                            name='conv-1D_2'))
    model.add(BatchNormalization(name='BatchNorm_2'))
    model.add(MaxPooling1D(pool_size=3, strides=1, name='MaxPool1D_2'))
    
    model.add(layers.Conv1D(64, 3, activation='relu', padding="same", 
                            kernel_initializer="he_uniform", 
                            bias_initializer=initializers.Constant(0.1), 
                            dilation_rate=4, 
                            name='conv-1D_3'))
    model.add(BatchNormalization(name='BatchNorm_3'))
    model.add(MaxPooling1D(pool_size=4, strides=1, name='MaxPool1D_3'))

    model.add(Bidirectional(LSTM(1024, return_sequences=True, dropout=0.5,
                             recurrent_dropout=0.2), input_shape=(241, 1)))
    model.add(TimeDistributed(Dense(128, activation='relu'), name='Dense_1'))
    model.add(TimeDistributed(Dropout(0.4, name='TimeDistributed-Dropout_1 = 0.4')))
    model.add(TimeDistributed(Dense(64, activation='relu'), name='Dense_2'))
    model.add(Flatten(name='Flatten'))
    model.add(Dropout(0.5, name='Dropout_1'))
    model.add(Dense(64, activation='relu', 
                    kernel_initializer="he_uniform", 
                    bias_initializer=initializers.Constant(0.1), 
                    name='Dense_3'))
    model.add(Dropout(0.5, name='Dropout_2'))
    
    model.add(Dense(1, activation='sigmoid', name='Sigmoid-layer'))
    
    visualize_model(model)
    model = multi_gpu_model(model, gpus=8)
#     opt = optimizers.SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)
#     opt = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    opt = keras.optimizers.Adadelta()
    tensorboard = TensorBoard(log_dir=os.path.join(log_dir, "TensorBoard_{}".format(time)), 
                              write_graph=True, write_grads=True, batch_size=2048)
    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    
#     logger = logging.getLogger(__name__)
#     model.summary(print_fn=logger.info)
    with open(os.path.join(log_dir, 'model_summary{}.txt'.format(time)),'w') as fh:
        # Pass the file handle in as a lambda function to make it callable
        model.summary(print_fn=lambda x: fh.write(x + '\n'))
#     model.summary()
    checkpoint_filepath = os.path.join(log_dir, "best-model-{}".format(time) + "_{epoch:02d}-{val_loss:.2f}.hdf5")
    checkpoint = ModelCheckpoint(checkpoint_filepath, monitor='val_loss', 
                                   verbose=1, save_best_only=True, mode="min")
    earlystopper = EarlyStopping(monitor='val_loss', patience=100, verbose=1)
    CNN_history = model.fit(partial_seq_train, 
              partial_label_train, 
              epochs=1000, 
              batch_size=2048, 
              validation_split=0.2, shuffle=True, callbacks=[tensorboard, earlystopper, checkpoint])
#     CNN_history = model.fit(partial_seq_train, 
#               partial_label_train, 
#               epochs=200, 
#               batch_size=1667)
#     model.save(saved_model_path)
#     results = model.evaluate(seq_test, label_test)
#     ynew = model.predict_classes(seq_test)
#     for i in range(len(seq_test)):
#         print("X" + str(i) + "_prediction=%s" % (ynew[i]))
#     print(results)
    return (model, CNN_history, results)


TypeError                                 Traceback (most recent call last)
<ipython-input-22-4d0435c2448d> in <module>()
      1 get_ipython().run_line_magic('time', '')
----> 2 CNN_1D_model = keras_1D_CNN()
      3 CNN_history_dict = CNN_1D_model[1].history
      4 CNN_history_dict.keys()
      5 print("\n%s: %.2f%%" % (CNN_1D_model[0].metrics_names[1], CNN_1D_model[2][1]*100))

<ipython-input-21-2bf565bb8ec7> in keras_1D_CNN()
    100               epochs=1000,
    101               batch_size=2048,
--> 102               validation_split=0.2, shuffle=True, callbacks=[tensorboard, earlystopper, checkpoint], verbose=0)
    103 #     CNN_history = model.fit(partial_seq_train,
    104 #               partial_label_train,

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1037                                         initial_epoch=initial_epoch,
   1038                                         steps_per_epoch=steps_per_epoch,
-> 1039                                         validation_steps=validation_steps)
   1040 
   1041     def evaluate(self, x=None, y=None,

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
    215                         for l, o in zip(out_labels, val_outs):
    216                             epoch_logs['val_' + l] = o
--> 217         callbacks.on_epoch_end(epoch, epoch_logs)
    218         if callback_model.stop_training:
    219             break

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/callbacks.py in on_epoch_end(self, epoch, logs)
     77         logs = logs or {}
     78         for callback in self.callbacks:
---> 79             callback.on_epoch_end(epoch, logs)
     80 
     81     def on_batch_begin(self, batch, logs=None):

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/callbacks.py in on_epoch_end(self, epoch, logs)
    444                             self.model.save_weights(filepath, overwrite=True)
    445                         else:
--> 446                             self.model.save(filepath, overwrite=True)
    447                     else:
    448                         if self.verbose > 0:

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/network.py in save(self, filepath, overwrite, include_optimizer)
   1088             raise NotImplementedError
   1089         from ..models import save_model
-> 1090         save_model(self, filepath, overwrite, include_optimizer)
   1091 
   1092     def save_weights(self, filepath, overwrite=True):

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/saving.py in save_model(model, filepath, overwrite, include_optimizer)
    380 
    381     try:
--> 382         _serialize_model(model, f, include_optimizer)
    383     finally:
    384         if opened_new_file:

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/saving.py in _serialize_model(model, f, include_optimizer)
     82     model_config['class_name'] = model.__class__.__name__
     83     model_config['config'] = model.get_config()
---> 84     model_config = json.dumps(model_config, default=get_json_type)
     85     model_config = model_config.encode('utf-8')
     86     f['model_config'] = model_config

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/json/__init__.py in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
    236         check_circular=check_circular, allow_nan=allow_nan, indent=indent,
    237         separators=separators, default=default, sort_keys=sort_keys,
--> 238         **kw).encode(obj)
    239 
    240 

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/json/encoder.py in encode(self, o)
    197         # exceptions aren't as detailed.  The list call should be roughly
    198         # equivalent to the PySequence_Fast that ''.join() would do.
--> 199         chunks = self.iterencode(o, _one_shot=True)
    200         if not isinstance(chunks, (list, tuple)):
    201             chunks = list(chunks)

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/json/encoder.py in iterencode(self, o, _one_shot)
    255                 self.key_separator, self.item_separator, self.sort_keys,
    256                 self.skipkeys, _one_shot)
--> 257         return _iterencode(o, 0)
    258 
    259 def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,

/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/keras/engine/saving.py in get_json_type(obj)
     72             return obj.__name__
     73 
---> 74         raise TypeError('Not JSON Serializable: %s' % (obj,))
     75 
     76     from .. import __version__ as keras_version

TypeError: Not JSON Serializable: <module 'tensorflow' from '/d/harpy1/s/python/v3-5.1.0/lib/python3.6/site-packages/tensorflow/__init__.py'>



Lance Norskog

unread,
Aug 14, 2019, 2:40:41 PM8/14/19
to Pejvak Moghimi, Keras-users
Maybe try 'import tensorflow' in the main program?
Try removing the Tensorflow object or other non-necessary members of the model.

Or, change the model save callbackto only save weights.

Good luck!


--
You received this message because you are subscribed to the Google Groups "Keras-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to keras-users...@googlegroups.com.
To view this discussion on the web, visit https://groups.google.com/d/msgid/keras-users/3e038912-1eca-4597-952b-b17c43982c1d%40googlegroups.com.


--
Lance Norskog
lance....@gmail.com
Redwood City, CA

Pejvak Moghimi

unread,
Aug 14, 2019, 5:23:29 PM8/14/19
to Keras-users
Hi Lance,

Thank you very much for the fast and helpful response.

Your second suggestion worked. Though, I wonder what information I am losing by only saving the weights. One would be the architecture itself, but, is there anything of importance that cannot be preserved by simply retaining the code for the model?

Best wishes,
Pejvak.

On Wednesday, August 14, 2019 at 7:40:41 PM UTC+1, Lance Norskog wrote:
Maybe try 'import tensorflow' in the main program?
Try removing the Tensorflow object or other non-necessary members of the model.

Or, change the model save callbackto only save weights.

Good luck!


To unsubscribe from this group and stop receiving emails from it, send an email to keras...@googlegroups.com.

Lance Norskog

unread,
Aug 14, 2019, 10:32:30 PM8/14/19
to Pejvak Moghimi, Keras-users
When you save the full model during training, you also save the optimizer state. So, you can train for N epochs, reload and train for another N epochs, and the optimizer follows its path. I have used this on Colab (servers reboot often). Otherwise, there's not much.

Lance

To unsubscribe from this group and stop receiving emails from it, send an email to keras-users...@googlegroups.com.
To view this discussion on the web, visit https://groups.google.com/d/msgid/keras-users/0d676e01-95b1-4447-9c33-eb36437a4500%40googlegroups.com.

Pejvak Moghimi

unread,
Aug 15, 2019, 6:20:11 AM8/15/19
to Keras-users
Ok, thank you, that's very helpful.

Pejvak


On Thursday, August 15, 2019 at 3:32:30 AM UTC+1, Lance Norskog wrote:
When you save the full model during training, you also save the optimizer state. So, you can train for N epochs, reload and train for another N epochs, and the optimizer follows its path. I have used this on Colab (servers reboot often). Otherwise, there's not much.

Lance

Reply all
Reply to author
Forward
0 new messages