Receiving Error: list index out of range while using flow_from_directory

Im trying to predict to image classes (0,1) using a pertained VGG19 with imagenet weights. Heres my code so far:

batch_size_train=32
batch_size_test=32

num_train=1688
num_test=310
num_of_epochs=10

def create_generator(img_path, batch_size):
    data_gen_args = dict(rescale=1./255,
                        rotation_range=90)
    img_datagen = ImageDataGenerator(**data_gen_args)
    img_generator = img_datagen.flow_from_directory(img_path, target_size=img_size,class_mode=None,color_mode="rgb", batch_size=batch_size,seed=seed)
    return img_generator

train_generator = create_generator(data_dir_train, batch_size=batch_size_train)
test_generator = create_generator(data_dir_test, batch_size=batch_size_test)

Found 1688 images belonging to 2 classes.
Found 310 images belonging to 2 classes.

base_model = VGG19(input_shape=(224,224,3), include_top=False, weights="imagenet")
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(1)

inputs = tf.keras.Input(shape=(224, 224, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)


base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])

epoch_step_train = num_train//batch_size_train
epoch_step_test = num_test//batch_size_test

model.fit_generator(generator=train_generator, 
                    steps_per_epoch=epoch_step_train, 
                    validation_data=test_generator, 
                    validation_steps=epoch_step_test,
                   epochs=num_of_epochs)

Everything works fine until model.fit_generator() which results in the following traceback:

IndexError                                Traceback (most recent call last)
<ipython-input-75-a6a14a0afe91> in <module>
      3                     validation_data=test_generator,
      4                     validation_steps=epoch_step_test,
----> 5                    epochs=num_of_epochs)

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1295         shuffle=shuffle,
   1296         initial_epoch=initial_epoch,
-> 1297         steps_name='steps_per_epoch')
   1298 
   1299   def evaluate_generator(self,

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py in model_iteration(model, data, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch, mode, batch_size, steps_name, **kwargs)
    263 
    264       is_deferred = not model._is_compiled
--> 265       batch_outs = batch_function(*batch_data)
    266       if not isinstance(batch_outs, list):
    267         batch_outs = [batch_outs]

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight, reset_metrics)
    971       outputs = training_v2_utils.train_on_batch(
    972           self, x, y=y, sample_weight=sample_weight,
--> 973           class_weight=class_weight, reset_metrics=reset_metrics)
    974       outputs = (outputs['total_loss'] + outputs['output_losses'] +
    975                  outputs['metrics'])

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics)
    262       y,
    263       sample_weights=sample_weights,
--> 264       output_loss_metrics=model._output_loss_metrics)
    265 
    266   if reset_metrics:

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in train_on_batch(model, inputs, targets, sample_weights, output_loss_metrics)
    309           sample_weights=sample_weights,
    310           training=True,
--> 311           output_loss_metrics=output_loss_metrics))
    312   if not isinstance(outs, list):
    313     outs = [outs]

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _process_single_batch(model, inputs, targets, output_loss_metrics, sample_weights, training)
    250               output_loss_metrics=output_loss_metrics,
    251               sample_weights=sample_weights,
--> 252               training=training))
    253       if total_loss is None:
    254         raise ValueError('The model cannot be run '

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _model_loss(model, inputs, targets, output_loss_metrics, sample_weights, training)
    164 
    165         if hasattr(loss_fn, 'reduction'):
--> 166           per_sample_losses = loss_fn.call(targets[i], outs[i])
    167           weighted_losses = losses_utils.compute_weighted_loss(
    168               per_sample_losses,

IndexError: list index out of range

Im new to Tensorflow and stackvoverlow and hope its okay that I post all this code. Any ideas where I messed up?

1 answer

  • answered 2020-10-23 12:52 Nicolas Gervais

    You set class_mode=None, which means that your generator doesn't return a target. This confuses Keras, which calculates the loss between the output and the (non-existant) label. Try 'binary' if you have two categories.