ValueError: padding must be zero for average_exc_pad

18 views
Skip to first unread message

Mnd Daniel

unread,
Aug 8, 2019, 3:57:19 AM8/8/19
to theano-users
Im trying to fine tune InceptionV3 to binary classification. I have follow the code at keras.io but got this error instead. I'm using theano backend with python 3.5, keras=2.2.4. Also I do not understand the hint at the end of error code.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.

Coding

inceptionV3_model=keras.applications.inception_v3.InceptionV3(weights='imagenet', include_top=False)

x = inceptionV3_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)

predictions = Dense(2, activation='softmax')(x)
model = Model(inputs=inceptionV3_model.input, outputs=predictions)

for layer in inceptionV3_model.layers:
    layer.trainable = False

model.compile(Adam(lr=.0001), loss='categorical_crossentropy', 
              metrics=['accuracy'])

history=model.fit_generator(train_batches, steps_per_epoch=10, 
                    validation_data=valid_batches, 
                    validation_steps=10, epochs=10, verbose=2)

for layer in model.layers[:249]:
   layer.trainable = False

for layer in model.layers[249:]:
   layer.trainable = True

model.compile(Adam(lr=.0001), loss='categorical_crossentropy', 
              metrics=['accuracy'])

history=model.fit_generator(train_batches, steps_per_epoch=10, 
                    validation_data=valid_batches, 
                    validation_steps=10, epochs=150, verbose=2)


Error

ValueError                                Traceback (most recent call last)
~\AppData\Roaming\Python\Python37\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
    902             outputs =\
--> 903                 self.fn() if output_subset is None else\
    904                 self.fn(output_subset=output_subset)

ValueError: padding must be zero for average_exc_pad

During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)
<ipython-input-42-347232762e6a> in <module>
      1 history=model.fit_generator(train_batches, steps_per_epoch=10, 
      2                     validation_data=valid_batches,
----> 3                     validation_steps=10, epochs=150, verbose=2)

C:\ProgramData\Anaconda3\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

C:\ProgramData\Anaconda3\lib\site-packages\keras\backend\theano_backend.py in __call__(self, inputs)
   1386     def __call__(self, inputs):
   1387         assert isinstance(inputs, (list, tuple))
-> 1388         return self.function(*inputs)
   1389 
   1390 

~\AppData\Roaming\Python\Python37\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
    915                     node=self.fn.nodes[self.fn.position_of_error],
    916                     thunk=thunk,
--> 917                     storage_map=getattr(self.fn, 'storage_map', None))
    918             else:
    919                 # old-style linkers raise their own exceptions

~\AppData\Roaming\Python\Python37\site-packages\theano\gof\link.py in raise_with_op(node, thunk, exc_info, storage_map)
    323         # extra long error message in that case.
    324         pass
--> 325     reraise(exc_type, exc_value, exc_trace)
    326 
    327 

~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
    690                 value = tp()
    691             if value.__traceback__ is not tb:
--> 692                 raise value.with_traceback(tb)
    693             raise value
    694         finally:

~\AppData\Roaming\Python\Python37\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
    901         try:
    902             outputs =\
--> 903                 self.fn() if output_subset is None else\
    904                 self.fn(output_subset=output_subset)
    905         except Exception:

ValueError: padding must be zero for average_exc_pad
Apply node that caused the error: AveragePoolGrad{ignore_border=True, mode='average_exc_pad', ndim=2}(Join.0, IncSubtensor{InplaceInc;::, ::, :int64:, :int64:}.0, TensorConstant{(2,) of 3}, TensorConstant{(2,) of 1}, TensorConstant{(2,) of 1})
Toposort index: 4151
Inputs types: [TensorType(float32, 4D), TensorType(float32, 4D), TensorType(int32, vector), TensorType(int32, vector), TensorType(int32, vector)]
Inputs shapes: [(10, 2048, 5, 5), (10, 2048, 5, 5), (2,), (2,), (2,)]
Inputs strides: [(204800, 100, 20, 4), (204800, 100, 20, 4), (4,), (4,), (4,)]
Inputs values: ['not shown', 'not shown', array([3, 3]), array([1, 1]), array([1, 1])]
Outputs clients: [[Elemwise{add,no_inplace}(CorrMM_gradInputs{half, (1, 1), (1, 1), 1 False}.0, CorrMM_gradInputs{half, (1, 1), (1, 1), 1 False}.0, CorrMM_gradInputs{half, (1, 1), (1, 1), 1 False}.0, AveragePoolGrad{ignore_border=True, mode='average_exc_pad', ndim=2}.0)]]

Backtrace when the node is created(use Theano flag traceback.limit=N to make it longer):
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1021, in access_term_cache
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1021, in <listcomp>
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1021, in access_term_cache
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1021, in <listcomp>
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "C:\Users\User\AppData\Roaming\Python\Python37\site-packages\theano\gradient.py", line 1162, in access_term_cache
    new_output_grads)

HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.


Reply all
Reply to author
Forward
0 new messages