NaNguardmode error

12 views
Skip to first unread message

Fei Tao

unread,
Oct 21, 2017, 6:13:48 PM10/21/17
to theano-dev
I ran theano with Keras. I run a very simple toy example with CTC loss. The code is listed as below:


batch_size = 32
frame_len = 3600
output_len = 600
nb_feat = 64
nb_class = 63
nb_output = nb_class + 1 # add output for blank
inner_dim = 16

def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
scale = 1
return K.ctc_batch_cost(labels, y_pred, input_length, label_length,scale)
input_data = Input(name='the_input', shape=(frame_len,nb_feat))
lstm1 = GRU(inner_dim,return_sequences = True, name='lstm1')(input_data)

y_pred = TimeDistributed(Dense(nb_output,activation='softmax',name='dense1'))(lstm1)
Model(inputs=input_data, outputs=y_pred).summary()

labels = Input(name='the_labels', shape=[output_len])
input_length = Input(name='input_length', shape=[1], dtype='int32')
label_length = Input(name='label_length', shape=[1], dtype='int32')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])

model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat)).astype('float32')
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, output_len)).astype('float32')
length = np.ones((batch_size,1),dtype='float32')*frame_len
output_length = np.ones((batch_size,1),dtype='float32')*output_len

input_dict = {'the_input':data,'the_labels':label,'input_length':length,'label_length':output_length}
output_dict = {'ctc':np.zeros([batch_size])}
opt = SGD(lr=0.001, decay=1e-6, momentum=0.9, clipnorm=1.,clipvalue=0.5)
# opt = Adam(clipnorm=1.,clipvalue=0.5)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=opt)
model.summary()

I find I have gradients of NaN. So I add NaNGuardMode to check where the error is. I have the output like below:

AssertionError: Inf detected
Big value detected
NanGuardMode found an error in the output of a node in this variable:
forall_inplace,cpu,scan_fn}.0 [id A] ''   
 |Elemwise{minimum,no_inplace} [id B] ''   
 | |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 | | |Subtensor{int64} [id D] ''   
 | | | |/input_length[t] [id E]
 | | | |Constant{0} [id F]
 | | |TensorConstant{0} [id G]
 | | |Shape_i{0} [id H] ''   
 | |   |<TensorType(float64, matrix)> [id I]
 | |Elemwise{sub,no_inplace} [id J] ''   
 |   |Elemwise{Composite{Switch(LT(i0, i1), i1, i0)}} [id K] ''   
 |   | |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 |   | |TensorConstant{0} [id G]
 |   |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1)}} [id L] ''   
 |     |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 |     |Elemwise{Composite{Switch(LT(i0, i1), i1, i0)}} [id K] ''   
 |     |TensorConstant{0} [id G]
 |     |TensorConstant{-1} [id M]
 |Subtensor{int64:int64:int8} [id N] ''   
 | |Elemwise{Composite{log(((i0 * i1) + i2))}}[(0, 1)] [id O] ''   
 | | |TensorConstant{(1, 1) of 0.9999} [id P]
 | | |InplaceDimShuffle{1,0} [id Q] ''   
 | | | |AdvancedSubtensor1 [id R] ''   
 | | |   |InplaceDimShuffle{1,0} [id S] ''   
 | | |   | |Subtensor{int64:int32:} [id T] ''   
 | | |   |   |<TensorType(float64, matrix)> [id I]
 | | |   |   |Constant{0} [id F]
 | | |   |   |ScalarFromTensor [id U] ''   
 | | |   |     |Subtensor{int64} [id D] ''   
 | | |   |AdvancedIncSubtensor1{inplace,set} [id V] ''   
 | | |     |Alloc [id W] ''   
 | | |     | |TensorConstant{-1} [id X]
 | | |     | |Elemwise{add,no_inplace} [id Y] ''   
 | | |     |   |TensorConstant{1} [id Z]
 | | |     |   |Elemwise{Mul}[(0, 1)] [id BA] ''   
 | | |     |     |TensorConstant{2} [id BB]
 | | |     |     |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id BC] ''   
 | | |     |       |Subtensor{int64} [id BD] ''   
 | | |     |       | |/label_length[t] [id BE]
 | | |     |       | |Constant{0} [id F]
 | | |     |       |TensorConstant{0} [id G]
 | | |     |       |Shape_i{0} [id BF] ''   
 | | |     |         |/the_labels[t] [id BG]
 | | |     |Subtensor{int64:int32:} [id BH] ''   
 | | |     | |/the_labels[t] [id BG]
 | | |     | |Constant{0} [id F]
 | | |     | |ScalarFromTensor [id BI] ''   
 | | |     |   |Subtensor{int64} [id BD] ''   
 | | |     |Elemwise{Composite{(i0 + (i1 * i2))}}[(0, 2)] [id BJ] ''   
 | | |       |TensorConstant{(1,) of 1} [id BK]
 | | |       |TensorConstant{(1,) of 2} [id BL]
 | | |       |ARange{dtype='int64'} [id BM] ''   
 | | |         |TensorConstant{0} [id G]
 | | |         |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id BC] ''   
 | | |         |TensorConstant{1} [id BN]
 | | |Elemwise{true_div,no_inplace} [id BO] ''   
 | |   |TensorConstant{(1, 1) of 0.0001} [id BP]
 | |   |InplaceDimShuffle{x,x} [id BQ] ''   
 | |     |Elemwise{add,no_inplace} [id Y] ''   
 | |ScalarFromTensor [id BR] ''   
 | | |Elemwise{Composite{Switch(i0, i1, minimum(i2, i3))}}[(0, 3)] [id BS] ''   
 | |   |Elemwise{le,no_inplace} [id BT] ''   
 | |   | |Elemwise{Composite{Switch(i0, Switch(LT((i1 + i2), i3), i3, (i1 + i2)), Switch(LT(i1, i2), i1, i2))}} [id BU] ''   
 | |   | | |Elemwise{lt,no_inplace} [id BV] ''   
 | |   | | | |Elemwise{minimum,no_inplace} [id B] ''   
 | |   | | | |TensorConstant{0} [id G]
 | |   | | |Elemwise{minimum,no_inplace} [id B] ''   
 | |   | | |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 | |   | | |TensorConstant{0} [id G]
 | |   | |TensorConstant{0} [id G]
 | |   |TensorConstant{0} [id G]
 | |   |TensorConstant{0} [id BW]
 | |   |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 | |ScalarFromTensor [id BX] ''   
 | | |Elemwise{Composite{Switch(i0, i1, minimum(i2, i3))}}[(0, 2)] [id BY] ''   
 | |   |Elemwise{le,no_inplace} [id BT] ''   
 | |   |TensorConstant{0} [id G]
 | |   |Elemwise{Composite{Switch(i0, Switch(LT((i1 + i2), i3), i3, (i1 + i2)), Switch(LT(i1, i2), i1, i2))}} [id BU] ''   
 | |   |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 | |Constant{1} [id BZ]
 |Subtensor{int64:int64:int64, ::int64} [id CA] ''   
 | |Elemwise{Composite{log(((i0 * i1) + i2))}}[(0, 1)] [id O] ''   
 | |ScalarFromTensor [id CB] ''   
 | | |Elemwise{Composite{Switch(i0, i1, maximum(i2, (i3 - i4)))}}[(0, 4)] [id CC] ''   
 | |   |Elemwise{le,no_inplace} [id CD] ''   
 | |   | |Elemwise{sub,no_inplace} [id CE] ''   
 | |   | | |Elemwise{Composite{Switch(LT(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3), i1), i1, Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3))}}[(0, 2)] [id CF] ''   
 | |   | | | |Elemwise{le,no_inplace} [id CG] ''   
 | |   | | | | |Elemwise{Composite{Switch(i0, Switch(LT(Composite{((i0 + i1) - i2)}(i1, i2, i3), i4), i4, Composite{((i0 + i1) - i2)}(i1, i2, i3)), Switch(LT(i1, i5), i1, i5))}}[(0, 2)] [id CH] ''   
 | |   | | | | | |Elemwise{lt,no_inplace} [id BV] ''   
 | |   | | | | | |Elemwise{minimum,no_inplace} [id B] ''   
 | |   | | | | | |Elemwise{Composite{Switch(LT(i0, i1), i1, i0)}} [id K] ''   
 | |   | | | | | |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1)}} [id L] ''   
 | |   | | | | | |TensorConstant{0} [id G]
 | |   | | | | | |Elemwise{sub,no_inplace} [id J] ''   
 | |   | | | | |TensorConstant{0} [id G]
 | |   | | | |TensorConstant{0} [id G]
 | |   | | | |Elemwise{Composite{Switch(i0, Switch(LT(Composite{((i0 + i1) - i2)}(i1, i2, i3), i4), i4, Composite{((i0 + i1) - i2)}(i1, i2, i3)), Switch(LT(i1, i5), i1, i5))}}[(0, 2)] [id CH] ''   
 | |   | | | |Elemwise{sub,no_inplace} [id J] ''   
 | |   | | | |TensorConstant{-1} [id X]
 | |   | | |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1), i3), i1), i4), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1), i3), i1), i4)}}[(0, 3)] [id CI] ''   
 | |   | |   |Elemwise{le,no_inplace} [id CG] ''   
 | |   | |   |TensorConstant{0} [id G]
 | |   | |   |TensorConstant{0} [id BW]
 | |   | |   |Elemwise{sub,no_inplace} [id J] ''   
 | |   | |   |Elemwise{Composite{Switch(LT(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3), i1), i1, Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3))}}[(0, 2)] [id CF] ''   
 | |   | |TensorConstant{0} [id G]
 | |   |TensorConstant{0} [id G]
 | |   |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1)}} [id L] ''   
 | |   |Elemwise{add,no_inplace} [id CJ] ''   
 | |   | |TensorConstant{-1} [id M]
 | |   | |Elemwise{Composite{Switch(LT(i0, i1), i1, i0)}} [id K] ''   
 | |   |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1), i3), i1), i4), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1), i3), i1), i4)}}[(0, 3)] [id CI] ''   
 | |ScalarFromTensor [id CK] ''   
 | | |Elemwise{Composite{Switch(i0, i1, Switch(AND(LT((i2 - i3), i1), GT(i4, i1)), (i5 - i6), maximum((i5 + i7), (i2 - i3))))}}[(0, 2)] [id CL] ''   
 | |   |Elemwise{le,no_inplace} [id CD] ''   
 | |   |TensorConstant{0} [id G]
 | |   |Elemwise{add,no_inplace} [id CJ] ''   
 | |   |Elemwise{Composite{Switch(LT(Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3), i1), i1, Composite{Switch(GE(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(Composite{Switch(i0, i1, minimum(i2, i3))}(i0, i1, i2, i3), i1, i3), i1, i4), i3))}}[(0, 2)] [id CF] ''   
 | |   |Elemwise{sub,no_inplace} [id CE] ''   
 | |   |TensorConstant{-1} [id M]
 | |   |Elemwise{Composite{Switch(LT(i0, i1), Switch(LT((i0 + i2), i1), i1, (i0 + i2)), Switch(LT(i0, i2), i0, i2))}}[(0, 2)] [id C] ''   
 | |   |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 + i3), i0)}((i0 - i1), i2, i3, i0), i2), i1)}} [id L] ''   
 | |Constant{-1} [id CM]
 | |Constant{-1} [id CM]
 |IncSubtensor{InplaceSet;:int64:} [id CN] ''   
 | |AllocEmpty{dtype='int32'} [id CO] ''   
 | | |Elemwise{Composite{(Switch(LT(maximum(i0, i1), i2), (maximum(i0, i1) + i3), (maximum(i0, i1) - i2)) + i3)}}[(0, 0)] [id CP] ''   
 | |   |Elemwise{Composite{((i0 - Switch(LT(i1, i2), i1, i2)) + i1)}} [id CQ] ''   
 | |   | |Elemwise{minimum,no_inplace} [id B] ''   
 | |   | |TensorConstant{1} [id Z]
 | |   | |Elemwise{add,no_inplace} [id CR] ''   
 | |   |   |TensorConstant{1} [id Z]
 | |   |   |Elemwise{minimum,no_inplace} [id B] ''   
 | |   |TensorConstant{2} [id BB]
 | |   |TensorConstant{1} [id BN]
 | |   |TensorConstant{1} [id Z]
 | |TensorConstant{(1,) of 1} [id CS]
 | |Constant{1} [id CT]
 |IncSubtensor{InplaceSet;:int64:} [id CU] ''   
 | |AllocEmpty{dtype='float64'} [id CV] ''   
 | | |Elemwise{Composite{(Switch(LT(maximum(i0, i1), i2), (maximum(i0, i1) + i3), (maximum(i0, i1) - i2)) + i4)}} [id CW] ''   
 | | | |Elemwise{Composite{((i0 - Switch(LT(i1, (i0 + i1)), i2, (i0 + i1))) + i1)}} [id CX] ''   
 | | | | |Elemwise{minimum,no_inplace} [id B] ''   
 | | | | |TensorConstant{1} [id Z]
 | | | | |TensorConstant{1} [id Z]
 | | | |TensorConstant{2} [id BB]
 | | | |TensorConstant{1} [id BN]
 | | | |TensorConstant{1} [id Z]
 | | | |TensorConstant{1} [id Z]
 | | |Elemwise{add,no_inplace} [id Y] ''   
 | |Rebroadcast{0} [id CY] ''   
 | | |Alloc [id CZ] ''   
 | |   |TensorConstant{(1, 1) of 0.0} [id DA]
 | |   |TensorConstant{1} [id Z]
 | |   |Elemwise{add,no_inplace} [id Y] ''   
 | |Constant{1} [id CT]
 |IncSubtensor{InplaceSet;:int64:} [id DB] ''   
 | |AllocEmpty{dtype='int32'} [id DC] ''   
 | | |Elemwise{Composite{(Switch(LT(maximum(i0, i1), i2), (maximum(i0, i1) + i3), (maximum(i0, i1) - i2)) + i3)}}[(0, 0)] [id DD] ''   
 | |   |Elemwise{Composite{((i0 - Switch(LT(i1, (i0 + i1)), i2, (i0 + i1))) + i1)}} [id CX] ''   
 | |   |TensorConstant{2} [id BB]
 | |   |TensorConstant{1} [id BN]
 | |   |TensorConstant{1} [id Z]
 | |TensorConstant{(1,) of 1} [id CS]
 | |Constant{1} [id CT]
 |IncSubtensor{InplaceSet;:int64:} [id DE] ''   
 | |AllocEmpty{dtype='float64'} [id DF] ''   
 | | |Elemwise{Composite{(Switch(LT(maximum(i0, i1), i2), (maximum(i0, i1) + i3), (maximum(i0, i1) - i4)) + i5)}}[(0, 0)] [id DG] ''   
 | | | |Elemwise{Composite{((i0 - Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{((i0 - (Switch(LT(i1, i2), i2, i1) - i3)) - i3)}((Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), Composite{((((i0 - Switch(GE(i1, i2), i2, i1)) - i3) // i4) + i4)}(Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i6, i2, (i0 + i3)), i2, i4), (i0 + i3), i7, i3), i2, i7), i2, (Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), i7), i2), Composite{Switch(LT(i0, i1), i1, i0)}((Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), i2)), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{((i0 - (Switch(LT(i1, i2), i2, i1) - i3)) - i3)}((Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), Composite{((((i0 - Switch(GE(i1, i2), i2, i1)) - i3) // i4) + i4)}(Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i6, i2, (i0 + i3)), i2, i4), (i0 + i3), i7, i3), i2, i7), i2, (Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), i7), i2), Composite{Switch(LT(i0, i1), i1, i0)}((Composite{Switch(GE(Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3), i2), (i2 - i4), Composite{Switch(LT(i0, i1), i2, i0)}(Composite{Switch(LT(i0, i1), (i0 + i2), i0)}(i0, i1, i2), i1, i3))}(i1, i2, (i0 + i3), i4, i5) + i3), i2))) + i3)}}[(0, 1)] [id DH] ''   
 | | | | |Elemwise{minimum,no_inplace} [id B] ''   
 | | | | |Elemwise{Composite{Switch(i0, i1, maximum(minimum((i2 + i3 + i4), i5), i6))}}[(0, 4)] [id DI] ''   
 | | | | | |Elemwise{le,no_inplace} [id DJ] ''   
 | | | | | | |Elemwise{sub,no_inplace} [id DK] ''   
 | | | | | | | |Elemwise{Composite{Switch(LT((i0 - i1), i2), i2, (i0 - i1))}} [id DL] ''   
 | | | | | | | | |Elemwise{add,no_inplace} [id CR] ''   
 | | | | | | | | |Elemwise{Composite{Switch(LT(i0, i1), i0, i1)}} [id DM] ''   
 | | | | | | | | | |TensorConstant{1} [id Z]
 | | | | | | | | | |Elemwise{add,no_inplace} [id CR] ''   
 | | | | | | | | |TensorConstant{0} [id G]
 | | | | | | | |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{(i0 - (i1 + i2))}(i0, i1, i2), i3, i4, i1), i3), i2), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{(i0 - (i1 + i2))}(i0, i1, i2), i3, i4, i1), i3), i2)}} [id DN] ''   
 | | | | | | |   |Elemwise{add,no_inplace} [id CR] ''   
 | | | | | | |   |Elemwise{Composite{Switch(LT(i0, i1), i0, i1)}} [id DM] ''   
 | | | | | | |   |Elemwise{Composite{Switch(LT((i0 - i1), i2), i2, (i0 - i1))}} [id DL] ''   
 | | | | | | |   |TensorConstant{0} [id G]
 | | | | | | |   |Elemwise{minimum,no_inplace} [id B] ''   
 | | | | | | |TensorConstant{0} [id G]
 | | | | | |TensorConstant{0} [id G]
 | | | | | |TensorConstant{-1} [id M]
 | | | | | |Elemwise{Composite{Switch(LT(i0, i1), i0, i1)}} [id DM] ''   
 | | | | | |Elemwise{Composite{Switch(LT((i0 - i1), i2), i2, (i0 - i1))}} [id DL] ''   
 | | | | | |Elemwise{add,no_inplace} [id CR] ''   
 | | | | | |TensorConstant{0} [id BW]
 | | | | |TensorConstant{0} [id G]
 | | | | |TensorConstant{1} [id Z]
 | | | | |TensorConstant{-1} [id X]
 | | | | |TensorConstant{1} [id BN]
 | | | | |Elemwise{Composite{Switch(i0, i1, Switch(AND(LT((i2 + i3 + i4), i1), GT(i5, i1)), (i6 - i7), minimum((i2 + i3 + i4), i8)))}}[(0, 3)] [id DO] ''   
 | | | | | |Elemwise{le,no_inplace} [id DJ] ''   
 | | | | | |TensorConstant{0} [id G]
 | | | | | |TensorConstant{-1} [id M]
 | | | | | |Elemwise{Composite{Switch(LT(i0, i1), i0, i1)}} [id DM] ''   
 | | | | | |Elemwise{Composite{Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{(i0 - (i1 + i2))}(i0, i1, i2), i3, i4, i1), i3), i2), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), (i2 - i3), i0)}(Composite{(i0 - (i1 + i2))}(i0, i1, i2), i3, i4, i1), i3), i2)}} [id DN] ''   
 | | | | | |Elemwise{sub,no_inplace} [id DK] ''   
 | | | | | |TensorConstant{-2} [id DP]
 | | | | | |Elemwise{minimum,no_inplace} [id B] ''   
 | | | | | |Elemwise{add,no_inplace} [id CR] ''   
 | | | | |TensorConstant{1} [id BN]
 | | | |TensorConstant{2} [id BB]
 | | | |TensorConstant{1} [id BN]
 | | | |TensorConstant{1} [id Z]
 | | | |TensorConstant{1} [id BN]
 | | | |TensorConstant{1} [id Z]
 | | |Elemwise{add,no_inplace} [id Y] ''   
 | |Rebroadcast{0} [id CY] ''   
 | |Constant{1} [id CT]
 |AdvancedSubtensor1 [id DQ] ''   
 | |Elemwise{add,no_inplace} [id DR] ''   
 | | |TensorConstant{(1,) of 1} [id BK]
 | | |Elemwise{Mul}[(0, 1)] [id DS] ''   
 | |   |TensorConstant{(1,) of 2} [id BL]
 | |   |ARange{dtype='int64'} [id DT] ''   
 | |     |TensorConstant{0} [id G]
 | |     |Elemwise{Composite{((i0 + i1) // i2)}}[(0, 1)] [id DU] ''   
 | |     | |TensorConstant{-2} [id DP]
 | |     | |Elemwise{Mul}[(0, 1)] [id BA] ''   
 | |     | |TensorConstant{2} [id BB]
 | |     |TensorConstant{1} [id BN]
 | |Subtensor{int64} [id DV] ''   
 |   |Nonzero [id DW] ''   
 |   | |Elemwise{neq,no_inplace} [id DX] ''   
 |   |   |AdvancedSubtensor1 [id DY] ''   
 |   |   | |AdvancedIncSubtensor1{inplace,set} [id V] ''   
 |   |   | |Elemwise{add,no_inplace} [id DR] ''   
 |   |   |AdvancedSubtensor1 [id DZ] ''   
 |   |     |AdvancedIncSubtensor1{inplace,set} [id V] ''   
 |   |     |Elemwise{Add}[(0, 1)] [id EA] ''   
 |   |       |TensorConstant{(1,) of 3} [id EB]
 |   |       |Elemwise{Mul}[(0, 1)] [id DS] ''   
 |   |Constant{0} [id F]
 |Alloc [id EC] ''   
 | |TensorConstant{(1,) of 0.0} [id ED]
 | |Elemwise{add,no_inplace} [id Y] ''   
 |AdvancedSubtensor1 [id EE] ''   
   |Elemwise{add,no_inplace} [id EF] ''   
   | |TensorConstant{(1,) of 1} [id BK]
   | |Elemwise{Mul}[(0, 1)] [id EG] ''   
   |   |TensorConstant{(1,) of 2} [id BL]
   |   |ARange{dtype='int64'} [id EH] ''   
   |     |TensorConstant{0} [id G]
   |     |Elemwise{Composite{((i0 + (Composite{Switch(LT(i0, i1), i1, i0)}(i1, i2) - Switch(LT(Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}((i1 - Composite{Switch(LT(i0, i1), i1, i0)}(i1, i2)), i2, i3), i2), Composite{Switch(LT(i0, i1), i1, i0)}(i1, i2)), Composite{Switch(LT(i0, i1), i1, i0)}(Composite{Switch(LT(i0, i1), i2, i0)}((i1 - Composite{Switch(LT(i0, i1), i1, i0)}(i1, i2)), i2, i3), i2), Composite{Switch(LT(i0, i1), i1, i0)}(i1, i2)))) // i4)}}[(0, 1)] [id EI] ''   
   |     | |TensorConstant{-3} [id EJ]
   |     | |Elemwise{add,no_inplace} [id Y] ''   
   |     | |TensorConstant{0} [id G]
   |     | |Elemwise{Mul}[(0, 1)] [id BA] ''   
   |     | |TensorConstant{2} [id BB]
   |     |TensorConstant{1} [id BN]
   |Subtensor{int64} [id EK] ''   
     |Nonzero [id EL] ''   
     | |Elemwise{neq,no_inplace} [id EM] ''   
     |   |AdvancedSubtensor1 [id EN] ''   
     |   | |Subtensor{::int64} [id EO] ''   
     |   | | |AdvancedIncSubtensor1{inplace,set} [id V] ''   
     |   | | |Constant{-1} [id CM]
     |   | |Elemwise{add,no_inplace} [id EF] ''   
     |   |AdvancedSubtensor1 [id EP] ''   
     |     |Subtensor{::int64} [id EO] ''   
     |     |Elemwise{Add}[(0, 1)] [id EQ] ''   
     |       |TensorConstant{(1,) of 3} [id EB]
     |       |Elemwise{Mul}[(0, 1)] [id EG] ''   
     |Constant{0} [id F]
forall_inplace,cpu,scan_fn}.1 [id A] ''   
forall_inplace,cpu,scan_fn}.2 [id A] ''   
forall_inplace,cpu,scan_fn}.3 [id A] ''   

Inner graphs of the scan ops:

forall_inplace,cpu,scan_fn}.0 [id A] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id ER] ''   
 > |TensorConstant{1} [id ES]
 > |<TensorType(int32, scalar)> [id ET] -> [id CN]
 > |TensorConstant{3} [id EU]
 > |Reduce{maximum}{0} [id EV] 'max'   
 > | |Join [id EW] ''   
 > |   |TensorConstant{0} [id EX]
 > |   |AdvancedSubtensor1 [id EY] ''   
 > |   | |<TensorType(int64, vector)> [id EZ] -> [id DQ]
 > |   | |Subtensor{int64} [id FA] ''   
 > |   |   |Nonzero [id FB] ''   
 > |   |   | |Elemwise{lt,no_inplace} [id FC] ''   
 > |   |   |   |<TensorType(int64, vector)> [id EZ] -> [id DQ]
 > |   |   |   |InplaceDimShuffle{x} [id FD] ''   
 > |   |   |     |<TensorType(int32, scalar)> [id ET] -> [id CN]
 > |   |   |Constant{0} [id FE]
 > |   |TensorConstant{(1,) of -1} [id FF]
 > |Shape_i{0} [id FG] ''   
 >   |<TensorType(float64, vector)> [id FH] -> [id N]
 >IncSubtensor{Set;:int32:} [id FI] ''   
 > |<TensorType(float64, vector)> [id FJ] -> [id EC]
 > |Elemwise{Composite{(i0 + log(i1) + i2)}}[(0, 1)] [id FK] ''   
 > | |Subtensor{:int32:} [id FL] ''   
 > | | |<TensorType(float64, vector)> [id FH] -> [id N]
 > | | |ScalarFromTensor [id FM] ''   
 > | |   |Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id ER] ''   
 > | |AdvancedIncSubtensor1{inplace,inc} [id FN] ''   
 > | | |IncSubtensor{Inc;int64::} [id FO] ''   
 > | | | |IncSubtensor{Set;:int32:} [id FP] ''   
 > | | | | |Subtensor{:int32:} [id FQ] ''   
 > | | | | | |<TensorType(float64, vector)> [id FJ] -> [id EC]
 > | | | | | |ScalarFromTensor [id FM] ''   
 > | | | | |Elemwise{Composite{exp((i0 - i1))}} [id FR] ''   
 > | | | | | |Subtensor{:int32:} [id FS] ''   
 > | | | | | | |<TensorType(float64, vector)> [id FT] -> [id CU]
 > | | | | | | |ScalarFromTensor [id FU] ''   
 > | | | | | |   |<TensorType(int32, scalar)> [id ET] -> [id CN]
 > | | | | | |InplaceDimShuffle{x} [id FV] ''   
 > | | | | |   |Reduce{maximum}{0} [id FW] 'max'   
 > | | | | |     |Subtensor{:int32:} [id FS] ''   
 > | | | | |ScalarFromTensor [id FU] ''   
 > | | | |Subtensor{:int64:} [id FX] ''   
 > | | | | |IncSubtensor{Set;:int32:} [id FP] ''   
 > | | | | |Constant{-1} [id FY]
 > | | | |Constant{1} [id FZ]
 > | | |AdvancedSubtensor1 [id GA] ''   
 > | | | |Elemwise{Composite{exp((i0 - i1))}} [id FR] ''   
 > | | | |AdvancedSubtensor1 [id EY] ''   
 > | | |Elemwise{Add}[(0, 1)] [id GB] ''   
 > | |   |TensorConstant{(1,) of 2} [id GC]
 > | |   |AdvancedSubtensor1 [id EY] ''   
 > | |InplaceDimShuffle{x} [id FV] ''   
 > |ScalarFromTensor [id FM] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id GD] ''   
 > |TensorConstant{1} [id ES]
 > |<TensorType(int32, scalar)> [id GE] -> [id DB]
 > |TensorConstant{3} [id EU]
 > |Reduce{maximum}{0} [id GF] 'max'   
 > | |Join [id GG] ''   
 > |   |TensorConstant{0} [id EX]
 > |   |AdvancedSubtensor1 [id GH] ''   
 > |   | |<TensorType(int64, vector)> [id GI] -> [id EE]
 > |   | |Subtensor{int64} [id GJ] ''   
 > |   |   |Nonzero [id GK] ''   
 > |   |   | |Elemwise{lt,no_inplace} [id GL] ''   
 > |   |   |   |<TensorType(int64, vector)> [id GI] -> [id EE]
 > |   |   |   |InplaceDimShuffle{x} [id GM] ''   
 > |   |   |     |<TensorType(int32, scalar)> [id GE] -> [id DB]
 > |   |   |Constant{0} [id FE]
 > |   |TensorConstant{(1,) of -1} [id FF]
 > |Shape_i{0} [id GN] ''   
 >   |<TensorType(float64, vector)> [id GO] -> [id CA]
 >IncSubtensor{Set;:int32:} [id GP] ''   
 > |<TensorType(float64, vector)> [id FJ] -> [id EC]
 > |Elemwise{Composite{(i0 + log(i1) + i2)}}[(0, 1)] [id GQ] ''   
 > | |Subtensor{:int32:} [id GR] ''   
 > | | |<TensorType(float64, vector)> [id GO] -> [id CA]
 > | | |ScalarFromTensor [id GS] ''   
 > | |   |Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id GD] ''   
 > | |AdvancedIncSubtensor1{inplace,inc} [id GT] ''   
 > | | |IncSubtensor{Inc;int64::} [id GU] ''   
 > | | | |IncSubtensor{Set;:int32:} [id GV] ''   
 > | | | | |Subtensor{:int32:} [id GW] ''   
 > | | | | | |<TensorType(float64, vector)> [id FJ] -> [id EC]
 > | | | | | |ScalarFromTensor [id GS] ''   
 > | | | | |Elemwise{Composite{exp((i0 - i1))}} [id GX] ''   
 > | | | | | |Subtensor{:int32:} [id GY] ''   
 > | | | | | | |<TensorType(float64, vector)> [id GZ] -> [id DE]
 > | | | | | | |ScalarFromTensor [id HA] ''   
 > | | | | | |   |<TensorType(int32, scalar)> [id GE] -> [id DB]
 > | | | | | |InplaceDimShuffle{x} [id HB] ''   
 > | | | | |   |Reduce{maximum}{0} [id HC] 'max'   
 > | | | | |     |Subtensor{:int32:} [id GY] ''   
 > | | | | |ScalarFromTensor [id HA] ''   
 > | | | |Subtensor{:int64:} [id HD] ''   
 > | | | | |IncSubtensor{Set;:int32:} [id GV] ''   
 > | | | | |Constant{-1} [id FY]
 > | | | |Constant{1} [id FZ]
 > | | |AdvancedSubtensor1 [id HE] ''   
 > | | | |Elemwise{Composite{exp((i0 - i1))}} [id GX] ''   
 > | | | |AdvancedSubtensor1 [id GH] ''   
 > | | |Elemwise{Add}[(0, 1)] [id HF] ''   
 > | |   |TensorConstant{(1,) of 2} [id GC]
 > | |   |AdvancedSubtensor1 [id GH] ''   
 > | |InplaceDimShuffle{x} [id HB] ''   
 > |ScalarFromTensor [id GS] ''   

forall_inplace,cpu,scan_fn}.1 [id A] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id ER] ''   
 >IncSubtensor{Set;:int32:} [id FI] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id GD] ''   
 >IncSubtensor{Set;:int32:} [id GP] ''   

forall_inplace,cpu,scan_fn}.2 [id A] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id ER] ''   
 >IncSubtensor{Set;:int32:} [id FI] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id GD] ''   
 >IncSubtensor{Set;:int32:} [id GP] ''   

forall_inplace,cpu,scan_fn}.3 [id A] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id ER] ''   
 >IncSubtensor{Set;:int32:} [id FI] ''   
 >Elemwise{Composite{Cast{int32}(minimum(maximum((i0 + i1), (i2 + i3)), i4))}} [id GD] ''   
 >IncSubtensor{Set;:int32:} [id GP] ''   



Apply node that caused the error: forall_inplace,cpu,scan_fn}(Elemwise{minimum,no_inplace}.0, Subtensor{int64:int64:int8}.0, Subtensor{int64:int64:int64, ::int64}.0, IncSubtensor{InplaceSet;:int64:}.0, IncSubtensor{InplaceSet;:int64:}.0, IncSubtensor{InplaceSet;:int64:}.0, IncSubtensor{InplaceSet;:int64:}.0, AdvancedSubtensor1.0, Alloc.0, AdvancedSubtensor1.0)
Toposort index: 124
Inputs types: [TensorType(int64, scalar), TensorType(float64, matrix), TensorType(float64, matrix), TensorType(int32, vector), TensorType(float64, matrix), TensorType(int32, vector), TensorType(float64, matrix), TensorType(int64, vector), TensorType(float64, vector), TensorType(int64, vector)]
Inputs shapes: [(), (3598, 1201), (3598, 1201), (3598,), (3598, 1201), (3598,), (3598, 1201), (595,), (1201,), (595,)]
Inputs strides: [(), (8, 28784), (-8, -28784), (4,), (9608, 8), (4,), (9608, 8), (8,), (8,), (8,)]
Inputs values: [array(3598), 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown']
Outputs clients: [[Subtensor{int64:int64:int8}(forall_inplace,cpu,scan_fn}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1})], [Subtensor{int64:int64:int8}(forall_inplace,cpu,scan_fn}.1, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1})], [Subtensor{int64:int64:int8}(forall_inplace,cpu,scan_fn}.2, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1})], [Subtensor{int64:int64:int64, ::int64}(forall_inplace,cpu,scan_fn}.3, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}, Constant{-1})]]

I don't know how to read the error information. Could anyone tell me where the error is?

Thank you very much!

Pascal Lamblin

unread,
Oct 23, 2017, 12:30:48 PM10/23/17
to thean...@googlegroups.com
NanGuardMode detected a large value in one of the outputs of a scan
node, probably the GRU.
However, it did not mention which time step, or which variable inside
the loop was responsible for it.
How did you specify that NanGuardMode should be used?
I'm not sure why it was not passed to the inner function of scan.

On 2017-10-21 06:13 PM, Fei Tao wrote:
> *I ran theano with Keras. I run a very simple toy example with CTC loss.
> The code is listed as below:*
>
>
>
> batch_size =32
> frame_len =3600
> output_len =600
> nb_feat =64
> nb_class =63
> nb_output = nb_class +1 # add output for blank
> inner_dim =16
>
> def ctc_lambda_func(args):
> y_pred, labels, input_length, label_length = args
> # the 2 is critical here since the first couple outputs of the RNN
> # tend to be garbage:
> y_pred = y_pred[:,2:, :]
> scale =1
> return K.ctc_batch_cost(labels, y_pred, input_length, label_length,scale)
>
> input_data = Input(name='the_input',shape=(frame_len,nb_feat))
> lstm1 = GRU(inner_dim,return_sequences =True,name='lstm1')(input_data)
>
> y_pred = TimeDistributed(Dense(nb_output,activation='softmax',name='dense1'))(lstm1)
> Model(inputs=input_data,outputs=y_pred).summary()
>
> labels = Input(name='the_labels',shape=[output_len])
> input_length = Input(name='input_length',shape=[1],dtype='int32')
> label_length = Input(name='label_length',shape=[1],dtype='int32')
> loss_out = Lambda(ctc_lambda_func,output_shape=(1,),name='ctc')([y_pred, labels, input_length, label_length])
>
> model = Model(inputs=[input_data, labels, input_length, label_length],outputs=loss_out)
>
> data = np.random.uniform(low = -5,high =5,size = (batch_size, frame_len, nb_feat)).astype('float32')
> # Dummy labels in range [1,nb_class]. 0 = <blank>
> label =1 + np.random.randint(nb_class,size = (batch_size, output_len)).astype('float32')
> length = np.ones((batch_size,1),dtype='float32')*frame_len
> output_length = np.ones((batch_size,1),dtype='float32')*output_len
>
> input_dict = {'the_input':data,'the_labels':label,'input_length':length,'label_length':output_length}
> output_dict = {'ctc':np.zeros([batch_size])}
> opt = SGD(lr=0.001,decay=1e-6,momentum=0.9,clipnorm=1.,clipvalue=0.5)
> # opt = Adam(clipnorm=1.,clipvalue=0.5)
> model.compile(loss={'ctc':lambda y_true, y_pred: y_pred},optimizer=opt)
> model.summary()
>
>
> *I find I have gradients of NaN. So I add NaNGuardMode to check where the
> error is. I have the output like below:*
> *I don't know how to read the error information. Could anyone tell me
> where the error is?*
>
> *
> *
>
> *Thank you very much!*
>
> --
>
> ---
> You received this message because you are subscribed to the Google
> Groups "theano-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send
> an email to theano-dev+...@googlegroups.com
> <mailto:theano-dev+...@googlegroups.com>.
> For more options, visit https://groups.google.com/d/optout.

--
Pascal Lamblin
Reply all
Reply to author
Forward
0 new messages