Issue
I am trying image to image mapping. i had to create a custom datagenerator as the imagedatagenerator gives y
argument is not supported when using keras.utils.Sequence
as input. error.I found this datagenerator created by someone else and modified it to fit my need. And now again i am getting this error. I dont know why i am getting this error.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Input, Dropout, Flatten, Dense
from keras.layers import Convolution2D
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import UpSampling2D
from keras.models import Model
from tensorflow.keras.layers import BatchNormalization
import os
import numpy as np
import cv2
class image_gen():
def __init__(self, sdir, batch_size,input_size, shuffle):
self.batch_index=0
self.sdir=sdir # directory containing input images
self.batch_size=batch_size #batch size is number of samples in a batch
# tuple (width, height) for target image
self.input_size=input_size
self.shuffle=shuffle # set to True to shuffle images, False for no shuffle
self.label_list=[] # initialize list to hold sequential list of total labels generated
self.image_list=[] # initialize list to hold sequential list of total images filenames generated
self.s_list=os.listdir(self.sdir) # list of images in directory
def get_images(self): # gets a batch of input images, resizes input image to make target images
input_image_batch=[] # initialize list to hold a batch of target images
# initialize list to hold batches of input images
sample_count=len(self.s_list) # determine total number of images available
for i in range(self.batch_index * self.batch_size, (self.batch_index + 1) * self.batch_size ): #iterate for a batch
j=i % sample_count # cycle j value over range of available images
k=j % self.batch_size # cycle k value over batch size
if self.shuffle: # if shuffle select a random integer between 0 and sample_count-1 to pick as the image=label pair
m=np.random.randint(low=0, high=sample_count-1, size=None, dtype=int)
else:
m=j # no shuffle
path_to_img=os.path.join(self.sdir, self.s_list[m]) # define the path to the m th image
input_image=cv2.imread(path_to_img)
#create the target image from the input image
input_image_batch.append(input_image)
input_image_array=np.array(input_image_batch)
self.batch_index=self.batch_index +1
yield (input_image_array)
batch_size=32
target_shape=(3402,3401,)
sdir=r"D:Input\\train" # set this to your image directory
shuffle=False
gen=image_gen(sdir, batch_size,target_shape,shuffle) # instantiate an instance of the class
input_images=next(gen.get_images()) # get a batch of inputs, labels
batch_size=32
target_shape=(3402,3401,)
sdir=r"D:\\Validation\\valid" # set this to your image directory
shuffle=False
gen=image_gen(sdir, batch_size,target_shape,shuffle) # instantiate an instance of the class
Valid_images=next(gen.get_images()) # get a batch of inputs, labels
batch_size=32
target_shape=(3402,3401,)
sdir=r"D:\\Output\\train" # set this to your image directory
shuffle=False
gen=image_gen(sdir, batch_size,target_shape,shuffle) # instantiate an instance of the class
output_images=next(gen.get_images()) # get a batch of inputs, labels
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_shape=(3402,3401,3),
pooling=None,
)
for layer in base_model.layers[:]:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Convolution2D(3,9,activation='relu',padding='same'))
model.add(UpSampling2D())
model.add(UpSampling2D())
model.add(BatchNormalization())
model.add(Convolution2D(3,9,activation='relu',padding='same'))
model.build((None, 3402, 3401, 3))
model.summary()
model.compile(optimizer="adam", loss='mean_squared_error', metrics=['mean_squared_error'])
model.fit(input_images,output_images,validation_data = Valid_images,epochs=100)
Error:
ValueError Traceback (most recent call last)
<ipython-input-20-41ebbbad4a4a> in <module>
----> 1 model.fit(input_images,output_images,validation_data = Valid_images,epochs=100)
~\anaconda3\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1095 (x, y, sample_weight), validation_split=validation_split))
1096
-> 1097 if validation_data:
1098 val_x, val_y, val_sample_weight = (
1099 data_adapter.unpack_x_y_sample_weight(validation_data))
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
Solution
It looks like model.fit expects a different type of argument for the validation_data
parameter than what you are providing it.
The Keras documentation on model.fit
states:
validation_data:
Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. Thus, note the fact that the validation loss of data provided using validation_split or validation_data is not affected by regularization layers like noise and dropout. validation_data will override validation_split.
validation_data could be:
- A tuple (x_val, y_val) of Numpy arrays or tensors.
- A tuple (x_val, y_val, val_sample_weights) of NumPy arrays.
- A tf.data.Dataset.
- A Python generator or keras.utils.Sequence returning (inputs, targets) or (inputs, targets, sample_weights).
What is the type of your validation data when it goes into model.fit()
?
Regarding the error message; if an if variable:
statement is used and variable
is a numpy array, whether this should evaluate to True
is ambiguous.
For example:
arr = np.array([True, True, False])
if arr:
...
Should this evaluate to true?
arr = np.array([True, True, True])
if arr:
...
Or should it only evaluate to true if all elements do?
If one wants the former to evaluate to true, one could use arr.any()
. If all elements are required to evaluate to True for the entire array to also be evaluated as such, arr.all()
should be used.
When neither option is provided, an exception is raised.
Now this could be a bug in the keras module, but I think it is far more likely that model.fit()
expects different input from what you are putting in.
Answered By - jrbergen
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.