InvalidArgumentError: You must feed a value for placeholder tensor 'reshape_target' with dtype float and shape [?,?,?] [[{{node reshape_target}}]]

281 views
Skip to first unread message

ramy...@gmail.com

unread,
Jul 5, 2020, 11:19:48 PM7/5/20
to Keras-users
Hi,

I have dataset with more than 10000 images and I am using tf.keras DataGenerator to load the data in batches. However, when I fit the model using model.fit_generator I get an error:You must feed a value for placeholder tensor 'reshape_target' with dtype float and shape [?,?,?]   [[{{node reshape_target}}]]

Here is the code:

```
import math
import random
import cv2
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import Sequence
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.layers import Concatenate, UpSampling2D, Conv2D, Reshape
from tensorflow.keras.models import Model

class DataGenerator(Sequence):
    
    def __init__(self, dataset, batch_size=10, shuffle=True, predict=False):        
        self.dataset = dataset
        self.batch_size=batch_size
        self.shuffle=shuffle
        self.predict=predict
        self.on_epoch_end()
    
    def __len__(self):         
        return math.ceil(len(self.dataset)/self.batch_size)    
       
    def __getitem__(self, index):   
        
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
        
        image_batch = [self.dataset[i][1]['dicom'] for i in indexes]
        bbox_batch = [self.dataset[i][1]['boxes'] for i in indexes]
        
        X = self.__generate_X(image_batch)
        if self.predict:
            return X
        else:
            masks = self.__generate_masks(image_batch, bbox_batch)
            return X, masks
        
    def __generate_X(self, image_batch): 
        
        X = np.zeros((len(image_batch), IMAGE_WIDTH, IMAGE_HEIGHT,3), dtype=np.float32)
        for k, image_path in enumerate(image_batch):
            img = dicom.read_file(image_path).pixel_array
            img = cv2.resize(img, dsize=(IMAGE_HEIGHT, IMAGE_WIDTH), interpolation=cv2.INTER_CUBIC)
            img = np.stack((img,)*3, axis=-1)
            X[k] = preprocess_input(np.array(img, dtype=np.float32))
        return X
                
    def __generate_masks(self, image_batch, bbox_batch):        
        
        X = np.zeros((len(image_batch), IMAGE_WIDTH, IMAGE_HEIGHT,3), dtype=np.float32)
        for k, image_path in enumerate(image_batch):
            img = dicom.read_file(image_path).pixel_array
            img = cv2.resize(img, dsize=(IMAGE_HEIGHT, IMAGE_WIDTH), interpolation=cv2.INTER_CUBIC)
            img = np.stack((img,)*3, axis=-1)
            X[k] = preprocess_input(np.array(img, dtype=np.float32))         
        return X
                
        masks = np.zeros((len(bbox_batch), IMAGE_WIDTH, IMAGE_HEIGHT))
        width_factor = IMAGE_WIDTH/imageWidth
        height_factor = IMAGE_HEIGHT/imageHeight
        
        for k, bbox_items in enumerate(bbox_batch):
            if len(bbox_items) > 0:
                for idx, val in enumerate(bbox_items):
                    x1 = round(val[0]* width_factor)
                    x2 = round((val[0]+val[2])* width_factor)
                    y1 = round(val[1]*height_factor)  
                    y2 = round((val[1]+val[3])*height_factor)
                    masks[k][y1:y2, x1:x2]=1
                return masks               
    
    def on_epoch_end(self):  
        self.indexes = np.arange(len(self.dataset))      
        if self.shuffle == True:
            np.random.shuffle(self.indexes) 

def create_model(trainable=True):
    
    model = MobileNet(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), include_top=False, alpha=ALPHA, weights='imagenet')
    
    for layer in model.layers:
        layer.trainable = trainable
        
    block1 = model.get_layer("conv_pw_1_relu").output
    block2 = model.get_layer("conv_pw_3_relu").output
    block3 = model.get_layer("conv_pw_5_relu").output
    block4 = model.get_layer("conv_pw_11_relu").output
    block5 = model.get_layer("conv_pw_13_relu").output
    
    x = Concatenate()([UpSampling2D()(block5), block4])
    x = Concatenate()([UpSampling2D()(x), block3])
    x = Concatenate()([UpSampling2D()(x), block2])
    x = Concatenate()([UpSampling2D()(x), block1])
    x = UpSampling2D()(x)
    
    x = Conv2D(1, kernel_size=1, activation='sigmoid')(x)
    x = Reshape((IMAGE_HEIGHT, IMAGE_WIDTH))(x)
    
    return Model(inputs=model.input, outputs=x)
model = create_model()
optimizer = Adam(lr = 0.001)
model.compile(loss=loss, optimizer=optimizer, metrics=[dice_coefficient])

train_gen = DataGenerator(X_train, batch_size=10,  predict=False, shuffle=True)
val_gen = DataGenerator(X_val, batch_size=10,  predict=False, shuffle=True)

model.fit_generator(train_gen, validation_data = val_gen, callbacks = [checkpoint, reduce_lr, stop], epochs=1,  verbose=1)
```
I am using tf 1.14 and tf.keras 2.2.4   Here is the error:
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
79/80 [============================>.] - ETA: 13s - loss: 3.4849 - dice_coefficient: 0.0768
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-17-871baa2fef40> in <module>
      2                     epochs=1, callbacks = [checkpoint, reduce_lr, stop],
      3                     workers=THREADS, use_multiprocessing=False,
----> 4                     shuffle=True, verbose=1)
      5 

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1431         shuffle=shuffle,
   1432         initial_epoch=initial_epoch,
-> 1433         steps_name='steps_per_epoch')
   1434 
   1435   def evaluate_generator(self,

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\keras\engine\training_generator.py in model_iteration(model, data, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch, mode, batch_size, steps_name, **kwargs)
    320           verbose=0,
    321           mode=ModeKeys.TEST,
--> 322           steps_name='validation_steps')
    323 
    324       if not isinstance(val_results, list):

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\keras\engine\training_generator.py in model_iteration(model, data, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch, mode, batch_size, steps_name, **kwargs)
    262 
    263       is_deferred = not model._is_compiled
--> 264       batch_outs = batch_function(*batch_data)
    265       if not isinstance(batch_outs, list):
    266         batch_outs = [batch_outs]

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\keras\engine\training.py in test_on_batch(self, x, y, sample_weight, reset_metrics)
   1245       self._update_sample_weight_modes(sample_weights=sample_weights)
   1246       self._make_test_function()
-> 1247       outputs = self.test_function(inputs)  # pylint: disable=not-callable
   1248 
   1249     if reset_metrics:

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\keras\backend.py in __call__(self, inputs)
   3290 
   3291     fetched = self._callable_fn(*array_vals,
-> 3292                                 run_metadata=self.run_metadata)
   3293     self._call_fetch_callbacks(fetched[-len(self._fetches):])
   3294     output_structure = nest.pack_sequence_as(

~\Anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
   1456         ret = tf_session.TF_SessionRunCallable(self._session._session,
   1457                                                self._handle, args,
-> 1458                                                run_metadata_ptr)
   1459         if run_metadata:
   1460           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

InvalidArgumentError: You must feed a value for placeholder tensor 'reshape_target' with dtype float and shape [?,?,?]
     [[{{node reshape_target}}]]

Lance Norskog

unread,
Jul 6, 2020, 7:53:43 PM7/6/20
to ramy...@gmail.com, Keras-users
I think you need a -1 for the batch size in the Reshape layer:




--
You received this message because you are subscribed to the Google Groups "Keras-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to keras-users...@googlegroups.com.
To view this discussion on the web, visit https://groups.google.com/d/msgid/keras-users/1012e863-6976-4583-8d87-efa0d30fedb5o%40googlegroups.com.


--
Lance Norskog
lance....@gmail.com
Redwood City, CA

nram...@gmail.com

unread,
Jul 7, 2020, 1:39:36 AM7/7/20
to Keras-users
I tried that but it still gives me the placeholder error
To unsubscribe from this group and stop receiving emails from it, send an email to keras...@googlegroups.com.
Reply all
Reply to author
Forward
0 new messages