I am having problem in fit function of keras, the error is saying, "in user code"

11,626 views
Skip to first unread message

Jibran Mir

unread,
Dec 4, 2021, 6:59:28 AM12/4/21
to Keras-users
these are the errors that I don't understand. Please guide me

Epoch 1/200
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-39-0c8f8254c82a> in <module> ----> 1 history = model.fit(X_tr, np.array(y_tr), batch_size=22, epochs=200, validation_split=0.1, verbose=1) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs) 106 def _method_wrapper(self, *args, **kwargs): 107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access --> 108 return method(self, *args, **kwargs) 109 110 # Running inside `run_distribute_coordinator` already. ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing) 1096 batch_size=batch_size): 1097 callbacks.on_train_batch_begin(step) -> 1098 tmp_logs = train_function(iterator) 1099 if data_handler.should_sync: 1100 context.async_wait() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds) 778 else: 779 compiler = "nonXla" --> 780 result = self._call(*args, **kwds) 781 782 new_tracing_count = self._get_tracing_count() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds) 812 # In this case we have not created variables on the first call. So we can 813 # run the first trace but we should fail if variables are created. --> 814 results = self._stateful_fn(*args, **kwds) 815 if self._created_variables: 816 raise ValueError("Creating variables on a non-first call to a function" ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs) 2826 """Calls a graph function specialized to the inputs.""" 2827 with self._lock: -> 2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs) 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access 2830 ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs) 3208 and self.input_signature is None 3209 and call_context_key in self._function_cache.missed): -> 3210 return self._define_function_with_shape_relaxation(args, kwargs) 3211 3212 self._function_cache.missed.add(call_context_key) ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _define_function_with_shape_relaxation(self, args, kwargs) 3139 expand_composites=True) 3140 -> 3141 graph_function = self._create_graph_function( 3142 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes) 3143 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes) 3063 arg_names = base_arg_names + missing_arg_names 3064 graph_function = ConcreteFunction( -> 3065 func_graph_module.func_graph_from_py_func( 3066 self._name, 3067 self._python_function, ~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes) 984 _, original_func = tf_decorator.unwrap(python_func) 985 --> 986 func_outputs = python_func(*func_args, **func_kwargs) 987 988 # invariant: `func_outputs` contains only Tensors, CompositeTensors, ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds) 598 # __wrapped__ allows AutoGraph to swap in a converted function. We give 599 # the function a weak reference to itself to avoid a reference cycle. --> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds) 601 weak_wrapped_fn = weakref.ref(wrapped_fn) 602 ~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs) 971 except Exception as e: # pylint:disable=broad-except 972 if hasattr(e, "ag_error_metadata"): --> 973 raise e.ag_error_metadata.to_exception(e) 974 else: 975 raise ValueError: in user code: C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function * return step_function(self, iterator) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica return self._call_for_each_replica(fn, args, kwargs) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica return fn(*args, **kwargs) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step ** outputs = model.train_step(data) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:756 train_step _minimize(self.distribute_strategy, tape, self.optimizer, loss, C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:2736 _minimize gradients = optimizer._aggregate_gradients(zip(gradients, # pylint: disable=protected-access C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:562 _aggregate_gradients filtered_grads_and_vars = _filter_grads(grads_and_vars) C:\Users\BlackPearl\anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:1270 _filter_grads raise ValueError("No gradients provided for any variable: %s." % ValueError: No gradients provided for any variable: ['embedding/embeddings:0', 'bidirectional/forward_lstm/lstm_cell_1/kernel:0', 'bidirectional/forward_lstm/lstm_cell_1/recurrent_kernel:0', 'bidirectional/forward_lstm/lstm_cell_1/bias:0', 'bidirectional/backward_lstm/lstm_cell_2/kernel:0', 'bidirectional/backward_lstm/lstm_cell_2/recurrent_kernel:0', 'bidirectional/backward_lstm/lstm_cell_2/bias:0', 'dense/kernel:0', 'dense/bias:0'].

Lance Norskog

unread,
Dec 4, 2021, 4:47:55 PM12/4/21
to Jibran Mir, Keras-users
A few tips:
1) Make the model as simple as possible.
2) Print out the model using model.summary() and post it here.
3) Anaconda does not have the latest versions of Tensorflow or Keras. Try installing Tensorflow 2.6 or 2.7. Use "tensorflow.keras" instead of "keras" to import Keras classes.


--
You received this message because you are subscribed to the Google Groups "Keras-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to keras-users...@googlegroups.com.
To view this discussion on the web, visit https://groups.google.com/d/msgid/keras-users/26e42f8c-71dd-4ee3-8ca0-4188a8270cfdn%40googlegroups.com.


--
Lance Norskog
lance....@gmail.com
Redwood City, CA

Jibran Mir

unread,
Dec 5, 2021, 2:59:00 AM12/5/21
to Keras-users
Dear sir, 

Thanks for replying to me I am attaching my screen shorts for model summary and tensor version. do I need to upgrade the tensor version? In addition, I want to use CRF as an output layer please guide me on all these things.

regards, 
Jibran Mir

summary_of_keras.JPG
tensor_version_imagepng.png

Lance Norskog

unread,
Dec 6, 2021, 12:11:16 PM12/6/21
to Jibran Mir, Keras-users
Tensorflow is now up to release 2.7 (I believe). You can try installing a more recent version. I do all of my work on Colab (Google's free notebook service). More recent releases can give different error messages.

Your code does not describe 'X_tr'. It is possible that X_tr and y_tr do not match each other the way that they should.

I do not know anything about CRF. It is a recent addition to Tensorflow. I generally use what is available in the Keras documentation Examples section. I would suggest that you get the base model working well before you add CRF.

Cheers,

Lance Norskog

Lance Norskog

unread,
Dec 6, 2021, 8:11:37 PM12/6/21
to Jibran Mir, Keras-users
It will be most effective to look through these examples and find one that does the same kind of task as you are trying to implement:




On Mon, Dec 6, 2021 at 12:23 PM Jibran Mir <jib...@gmail.com> wrote:
Dear Lance,

Yes, you are right the problem is not about tensorflow version or installation problem. The problem lies in data transformation since I am using a text dataset that needs to be transformed into numbers. I am sure the problem lies here. Please guide me on how to correctly transform text data that could be used in keras. I am going to attach my code file. 

Virus-free. www.avast.com

Virus-free. www.avast.com
Message has been deleted

Jano Jano

unread,
Mar 25, 2023, 2:59:39 PM3/25/23
to Keras-users
i have also similar problem
shapeimage.png
!pip install tensorflow
!pip install keras
!pip install numpy
!pip install rouge
!pip install matplotlib

# Import necessary libraries
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Concatenate, TimeDistributed, Bidirectional, Attention, Masking
from tensorflow.keras.callbacks import EarlyStopping


# Define hyperparameters
vocab_size = 5000
max_len_text = 200
max_len_summary = 20
embedding_dim = 200
hidden_units = 256

# Load and preprocess data
with open('train.txt''r', encoding='utf-8'as f:
    text_lines = f.read().split('\n')
with open('summary.txt''r', encoding='utf-8'as f:
    summary_lines = f.read().split('\n')

# Tokenize input and output text
input_tokenizer = Tokenizer(num_words=vocab_size)
input_tokenizer.fit_on_texts(text_lines)
input_seq = input_tokenizer.texts_to_sequences(text_lines)
input_seq = pad_sequences(input_seq, maxlen=max_len_text, padding='post')

output_tokenizer = Tokenizer(num_words=vocab_size)
output_tokenizer.fit_on_texts(summary_lines)
output_seq = output_tokenizer.texts_to_sequences(summary_lines)
output_seq = pad_sequences(output_seq, maxlen=max_len_summary, padding='post')
# Define the encoder-decoder model with attention
# Encoder model
encoder_inputs = Input(shape=(max_len_text,))
enc_emb = Embedding(vocab_size, embedding_dim)(encoder_inputs)
masked_inputs = Masking()(enc_emb)   # Add a Masking() layer
encoder_lstm = Bidirectional(LSTM(hidden_units, return_sequences=True, return_state=True))
encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder_lstm(masked_inputs)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])

# Decoder model
decoder_inputs = Input(shape=(None,))
dec_emb = Embedding(vocab_size, embedding_dim)(decoder_inputs)
decoder_lstm = LSTM(hidden_units*2, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state=[state_h, state_c])

# Attention mechanism
attn_layer = Attention()
attn_out= attn_layer([encoder_outputs, decoder_outputs])

# Slice the last token from both output tensors
decoder_outputs = decoder_outputs[:, :-1, :]
attn_out = tf.slice(attn_out, [000], [-1, max_len_summary-1-1])

# Concatenate attention output and decoder output
decoder_concat_input = Concatenate(axis=-1)([decoder_outputs, attn_out])

# Dense layer
decoder_dense = TimeDistributed(Dense(vocab_size, activation='softmax'))
decoder_outputs = decoder_dense(decoder_concat_input)

# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')



# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

# Compile the model
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')

# Define early stopping callback
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=2)

# Train the model
model.fit([input_seq, output_seq[:,:-1]], output_seq[:,1:], epochs=50, batch_size=64, validation_split=0.2, callbacks=[early_stop])

# Save the model
model.save('tigrinya_summarization_model.h5')

# Generate summaries for new text data
def generate_summary(input_text):
    # Preprocess input text
    input_seq = input_tokenizer.texts_to_sequences([input_text])
    input_seq = pad_sequences(input_seq, maxlen=max_len_text, padding='post')
    
    # Generate summary
    decoder_input = tf.expand_dims([output_tokenizer.word_index['start']], 0)
    decoder_output = ''
    while decoder_output != 'end' and len(decoder_output.split()) < max_len_summary:
        decoder_output, state_h, state_c = model.layers[3].predict([input_seq, decoder_input])
        decoder_output = tf.argmax(decoder_output, axis=-1)
        decoder_output = output_tokenizer.index_word[int(decoder_output)]
        decoder_input = tf.expand_dims([output_tokenizer.word_index[decoder_output]], 0)
    return decoder_output


Jano Jano

unread,
Mar 25, 2023, 3:00:52 PM3/25/23
to Keras-users
this is the errro:

Epoch 1/50
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-23-82125d1916db> in <module> 9 10 # Train the model ---> 11 model.fit([input_seq, output_seq[:,:-1]], output_seq[:,1:], epochs=50, batch_size=64, validation_split=0.2, callbacks=[early_stop]) 12 13 # Save the model

1 frames
/usr/local/lib/python3.9/dist-packages/keras/engine/training.py in tf__train_function(iterator) 13 try: 14 do_return = True ---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope) 16 except: 17 do_return = False ValueError: in user code: File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1249, in train_function * return step_function(self, iterator) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1233, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1222, in run_step ** outputs = model.train_step(data) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1023, in train_step y_pred = self(x, training=True) File "/usr/local/lib/python3.9/dist-packages/keras/utils/traceback_utils.py", line 70, in error_handler raise e.with_traceback(filtered_tb) from None File "/usr/local/lib/python3.9/dist-packages/keras/backend.py", line 3572, in concatenate return tf.concat([to_dense(x) for x in tensors], axis) ValueError: Exception encountered when calling layer 'concatenate_16' (type Concatenate). Dimension 1 in both shapes must be equal, but are 18 and 19. Shapes are [?,18] and [?,19]. for '{{node model_6/concatenate_16/concat}} = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32](model_6/tf.__operators__.getitem_1/strided_slice, model_6/tf.slice_1/Slice, model_6/concatenate_16/concat/axis)' with input shapes: [?,18,512], [?,19,512], [] and with computed input tensors: input[2] = <2>. Call arguments received by layer 'concatenate_16' (type Concatenate): • inputs=['tf.Tensor(shape=(None, 18, 512), dtype=float32)', 'tf.Tensor(shape=(None, 19, 512), dtype=float32)']

Reply all
Reply to author
Forward
0 new messages