Augment all the images in a folder in keras - python

I am trying to augment all the satellite image(.TIFF format) at once but keep running into an error
Error: "('Input data in NumpyArrayIterator should have rank 4. You passed an array with shape', (0,))"
When i do it one at a time there is no error but when i run it in a folder it gives me the error.Can someone tell me what i am doing wrong
Thanks
from keras.preprocessing.image import ImageDataGenerator
from skimage import io
datagen = ImageDataGenerator(
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='constant', cval=125)
image_directory = 'E:\\opencv'
SIZE = 150
dataset = []
my_images = os.listdir(image_directory)
for i, image_name in enumerate(my_images):
if (image_name.split('.')[1] == 'TIFF'):
image = io.imread(image_directory + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE,SIZE))
dataset.append(np.array(image))
x = np.array(dataset)
i = 0
for batch in datagen.flow(x, batch_size=16,
save_to_dir='E:\\opencv',
save_prefix='a',
save_format='TIFF'):
i += 1
if i > 20:
break

modify code as follows
for i, image_name in enumerate(my_images):
if (image_name.split('.')[1] == 'TIFF'):
fpath=os.path.join(image_directory, image_name)
print (fpath) # if this does not execute your if statement is incorrect
image = io.imread(fpath)
print (image.shape) # verify the image was read correctly
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE,SIZE))
print (image.size) # verify image was resized
dataset.append(np.array(image))
or
for i, image_name in enumerate(my_images):
if (image_name.split('.')[1] == 'TIFF'):
image = io.imread(image_directory + '/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE,SIZE))
dataset.append(np.array(image))

Related

Why i got 'Graph Execution Error?' when predict deep learning model

First of all i already load my model to predict inference set that i already prepared, but i got error when to try predict and show the result.
so here my code
def load_img(filename):
img = read_file(filename) # Load Data
img = decode_image(img, channels=3) # convert to RGB
img = resize(img, size=[img_height, img_height])
img = np.array(img)[:,:,1] # Resize image
img = img/255. # Rescale Images
return img
inf1 = load_img(r'ML2\COVID-19\inf_set\covid\covid - 1.jpeg')
inf2 = load_img(r'ML2\COVID-19\inf_set\covid\covid - 2.jpeg')
inf3 = load_img(r'ML2\COVID-19\inf_set\normal\Normal - 1.jpeg')
inf4 = load_img(r'ML2\COVID-19\inf_set\normal\Normal - 2.jpeg')
inf5 = load_img(r'ML2\COVID-19\inf_set\pneumonia\Pneumonia - 1.jpeg')
inf6 = load_img(r'ML2\COVID-19\inf_set\pneumonia\Pneumonia - 2.jpeg')
plt.figure(figsize=(35, 5))
plt.suptitle('Prediction Results', fontsize=15)
counter = 1
for i in [inf1,inf2, inf3, inf4, inf5,inf6]:
plt.subplot(1,6,counter)
res = int(tf.round(model.predict(x=expand_dims(i, axis=0))))
plt.imshow(i)
plt.title(f"Prediction: {label_data[res]}")
plt.axis('off')
counter += 1
plt.show()
And here the error notification
So, i need help to solve this proble, thank you before
You are squashing the image size in this line
img = np.array(img)[:,:,1]
This is why your image becomes size (220,220) instead of (220,220,1), which when you do expand_dims will be of proper input shape (1,220,220,1)
You could change the load_img function, or you could solve this by doing
res = int(tf.round(model.predict(x=expand_dims(i, axis=[0,3]))))

Extractung features from CNN (Xception model), I got IsADirectoryError?

to extract features from xcption model
I'm working on Image caption generator using cnn and lstm and i shared the code i got from a site. I tried to solve it but when i do changes in code I'm getting error on some other part so can anyone plaese tell me how to solve this
def extract_features(directory):
model = Xception( include_top=False, pooling='avg' )
features = {}
for img in tqdm(os.walk(directory)):
filename = directory + "/" + img
image = Image.open(filename)
image = image.resize((299,299))
image = np.expand_dims(image, axis=0)
#image = preprocess_input(image)
image = image/127.5
image = image - 1.0
feature = model.predict(image)
features[img] = feature
return features
#2048 feature vector
features = extract_features(dataset_images)
dump(features, open("features.p","wb"))
error
TypeError Traceback (most recent call last)
<ipython-input-8-5f1d4e4e3211> in <module>()
16
17 #2048 feature vector
---> 18 features = extract_features(dataset_images)
19 dump(features, open("features.p","wb"))
<ipython-input-8-5f1d4e4e3211> in extract_features(directory)
3 features = {}
4 for img in tqdm(os.walk(directory)):
----> 5 filename = directory + "/" + img
6 image = Image.open(filename)
7 image = image.resize((299,299))
TypeError: can only concatenate str (not "tuple") to str
In your case img in filename = directory + "/" + img is a tuple and directory is a string, so they cannot be concatenated.
So you have loop through img. In that case you might get another error because img includes root directory and sub directories.
def extract_features(directory):
model = Xception( include_top=False, pooling='avg' )
features = {}
for (root,dirs,files) in tqdm(os.walk(directory)):
for img in files:
filename = directory + "/" + img
image = Image.open(filename)
image = image.resize((299,299))
image = np.expand_dims(image, axis=0)
#image = preprocess_input(image)
image = image/127.5
image = image - 1.0
feature = model.predict(image)
features[img] = feature
return features

Is there a way to pass 8-channel image to keras for CNN?

The problem: I am unable to process CNN model for training 8-channel .TIF images.
Expected Output: Map training data (train_ds) via gdal and train model.
data (images):
n = 600
shape = (256, 256, 8)
data structure:
project_photos/
....classes/
......barren/
......agriculture/
......wooded/
import numpy as np
import os
import PIL
import PIL.Image
import tensorflow as tf
import tensorflow_datasets as tfds
import pathlib
>print (tf.__version__)
2.1.0
data_dir = ".\projects\keras\projectA\project_photos\classes")
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.tif')))
>print(image_count)
600
list_ds = tf.data.Dataset.list_files(str(data_dir/'*/*'), shuffle=False)
list_ds = list_ds.shuffle(image_count, reshuffle_each_iteration=False)
batch_size = 32
img_height = 256
img_width = 256
>for f in list_ds.take(5):
> print(f.numpy())
b'/home/projects/keras/projectA/project_photos/classes/barren/12345_b0001.tif'
b'/home/projects/keras/projectA/project_photos/classes/wooded//12345_w0001.tif'
b'/home/projects/keras/projectA/project_photos/classes/barren/12345_b0002.tif'
b'/home/projects/keras/projectA/project_photos/classes/agriculture//12345_a0001.tif'
b'/home/projects/keras/projectA/project_photos/classes/wooded/12345_w0002.tif'
# tree structure
>class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))
print(class_names)
['barren' 'agriculture' 'wooded']
# train/validation split
val_size = int(image_count * 0.2)
train_ds = list_ds.skip(val_size)
val_ds = list_ds.take(val_size)
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
one_hot = parts[-2] == class_names
# Integer encode the label
return tf.argmax(one_hot)
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size
return tf.image.resize(img, [img_height, img_width])
def process_path(file_path):
label = get_label(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
train_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(process_path, num_parallel_calls=AUTOTUNE)
I understand that tensorflow has limited support (experimental) for decode_tiff, and even if that did work - I am unable to use the latest version of TF that has that update.
This leaves me with attempting workarounds, the following - which have not succeeded:
"""
Updating decode_img(img) in attempt to process 8-channel .TIF raster
"""
#attempt, adding gdal_Open variable to decode_img
## fails due to image path (train_ds) being stored as byte.
x = gdal.Open(file_path)
Error: Not a string.
#attempt, modifying to extract PATH as str().
def process_path(file_path):
label = get_label(file_path)
# load the raw data from the file as a string
img = ''
for fpath in file_path:
img = fpath.numy()
img = decode_img(img)
return img, label
>train_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE)
ValueError: len requires a non-scalar tensor, got one of shape Tensor("Shape:0", shape=(0,), dtype=int32)
#attempt, processing outside of `.map`, works just fine.
imgList = []
for elem in train_ds:
img = elem.numpy()
img = img.decode()
imgList.append(img)
file_path = imgList[0]
raster = gdal.Open(file_path)
bands = [raster.GetRasterBand(k + 1).ReadAsArray() for k in range (raster.RasterCount)]
n_bands = len(bands)
img_array = np.stack(bands,2)
img = tf.convert_to_tensor(img_array, dtype = tf.float32)
img = tf.image.resize(img, [img_height, img_width])
print(type(img))
print(img.numpy().shape)
<class: 'tensorflow.python.framework.ops.EagerTensor'>
(256, 256, 8)
So, any ideas on how I can get this to work within the TF framework - getting TF to process the raster via .map?

Perform Augmentation (using ImageDataGenerator) and save augmented image as original name

I am applying augmentation to 493 classes and each class has 1 or 2 or 3 or 4 images (its not known 1 class may have only 1 image other may have 2 images). When I apply augmentation using ImageDataGenerator I get the augmented images but the name of the images are generated randomly , I want the augemnted image name as the original image name.I tried some code:
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
import glob,os
path = './newaug'
outpath = './newaug_result5/'
filenames = glob.glob(path + "/**/*.png",recursive=True)
imgnum=50
print (filenames)
for img in filenames:
if "DS_Store" in img: continue
src_fname, ext = os.path.splitext(img)
train_datagen=ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range = 10,
width_shift_range=0.05,
height_shift_range=0.05,
fill_mode='constant',cval=0.0)
jf_datagen=ImageDataGenerator(
preprocessing_function=preprocess_input
)
img_name = src_fname.split('/')[-1]
new_dir = os.path.join(outpath, src_fname.split('/')[-1].rsplit('-', 1)[0])
if not os.path.lexists(new_dir):
os.mkdir(new_dir)
#save_fname = os.path.join(new_dir, os.path.basename(img_name))
save_fname = new_dir
i=0
train_generator=train_datagen.flow_from_directory(path,target_size=(224,224),
save_to_dir=save_fname)
for batch in train_generator:
i += 1
if i > imgnum:
break
for batch in jf_datagen.flow_from_directory(path,target_size=(224,224),
save_to_dir=save_fname):
i += 1
if i > imgnum:
break
What I am getting is and images also belong to different classes.
classname1/
|-01_133214.png
|-02_43434.png (This image actually belongs to class 2)
classname2/
|-01_13333214.png(This image actually belongs to class 1)
|-02_4343334.png
|-03_13333214.png(This image actually belongs to class 3)
What I want is , generate the folder same as class and also the augmented images should be save in the same class and name should be same as original image.
classname1/ (Images should belong to same class, for eg 01 signifies classname1)
|classname1-01_2424424.png
|classname1-01_2134242.png
|
|classname1-01_232424.png
classname2/
|classname2-02_323212.png
|classname2-02_321313.png
|
|classname2-02_333339.png
It worked using flow instead of flow_from_directory.
The code is:
import numpy as np
import keras,glob,os
import cv2
from keras.preprocessing.image import ImageDataGenerator, array_to_img,img_to_array, load_img
img_path = './newaug'
outpath = './newaug_result7/'
filenames = glob.glob(img_path + "/**/*.png",recursive=True)
for img in filenames:
if "DS_Store" in img: continue
src_fname, ext = os.path.splitext(img)
datagen = ImageDataGenerator(rotation_range = 10,
width_shift_range=0.05,
height_shift_range=0.05,
fill_mode='constant',cval=0.0)
img = load_img(img)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
img_name = src_fname.split('/')[-1]
new_dir = os.path.join(outpath, src_fname.split('/')[-1].rsplit('-', 1)[0])
if not os.path.lexists(new_dir):
os.mkdir(new_dir)
#save_fname = os.path.join(new_dir, os.path.basename(img_name))
save_fname = new_dir
i = 0
for batch in datagen.flow (x, batch_size=1, save_to_dir = save_fname,
save_prefix = img_name, save_format='jpg'):
i+=1
if i>51:
break

Can Keras.load_img work on multiple images in folder

I'm working on an algorithm to classify pictures of dogs and cats using Keras. As per the code below, this works when I feed in one image, but my question is can Keras.load_img work on multiple images in a folder? When I try this it says list object has no attributes seek and read.
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
I think there is a solution, you can try this:
images = []
for img in os.listdir(folder_path):
img = image.load_img(img, target_size=(img_width, img_height))
img = img.img_to_array(img)
img = np.expand_dims(img, axis=0)
images.append(img)
images = np.vstack(images)
classes = classifier.predict(images, batch_size=10)
print(classes)

Categories

Resources