Resizing an input image in a Keras Lambda layer - python

I would like my keras model to resize the input image using OpenCV or similar.
I have seen the use of ImageGenerator, but I would prefer to write my own generator and simply resize the image in the first layer with keras.layers.core.Lambda.
How would I do this?

If you are using tensorflow backend then you can use tf.image.resize_images() function to resize the images in Lambda layer.
Here is a small example to demonstrate the same:
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
from keras.layers import Lambda, Input
from keras.models import Model
from keras.backend import tf as ktf
# 3 channel images of arbitrary shape
inp = Input(shape=(None, None, 3))
try:
out = Lambda(lambda image: ktf.image.resize_images(image, (128, 128)))(inp)
except :
# if you have older version of tensorflow
out = Lambda(lambda image: ktf.image.resize_images(image, 128, 128))(inp)
model = Model(input=inp, output=out)
model.summary()
X = scipy.ndimage.imread('test.jpg')
out = model.predict(X[np.newaxis, ...])
fig, Axes = plt.subplots(nrows=1, ncols=2)
Axes[0].imshow(X)
Axes[1].imshow(np.int8(out[0,...]))
plt.show()

Related

How can I solve " cuda 2D conv problem "?

this is my code , but predict funtion not work
Error:
from keras.applications.vgg16 import VGG16
from keras.utils.vis_utils import plot_model
#Keras will download the weight files from the Internet and store them in the ~/.keras/models directory.
model = VGG16()
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
# load an image from file
image = load_img('output.png', target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
from keras.applications.vgg16 import decode_predictions
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
# predict the probability across all output classes
yhat = model.predict(image)
# convert the probabilities to class labels
label = decode_predictions(yhat)
# retrieve the most likely result, e.g. highest probability
label = label[0][0]
# print the classification
print('%s (%.2f%%)' % (label[1], label[2]*100))
img = Image.open('output.png')
plt.imshow(img)
whast should I do ??
wekfjeojocwjcopjcoekcopejcpoekcpwekcpejoejocijewjwrjwihujviruhviuhr
The error is that it failed to the the convolution algorithm. Not sure of all the things that can cause this error but when I get that error it is because I have more than one instance of python running that is using tensorflow. So in your jupyter notebook disable the kernel for all python open notebooks except for the notebook you want to run.

convert keras input to numpy array

Tensor("flatten_3/Identity:0", shape=(None, 100), dtype=float32)
Hi I have tried to print tensor as numpy array as follows:
import tensorflow as tf
import numpy as np
from keras.layers import Input
print(tf.executing_eagerly())
x = Input(shape=(32,))
print(x.numpy())
To be honest I'm sure there is a cleaner way to visualize your input tensor, but here's a hacky one for what it's worth:
import tensorflow as tf
def tf_print(x):
tf.print("my tensor:")
tf.print(x)
return x
iput = tf.keras.layers.Input(shape=(1,), dtype='int32')
printt = tf.keras.layers.Lambda(tf_print)(iput) # branch that prints your tensor
oput = tf.keras.layers.Flatten()(iput) # branch that is the rest of your model
model = tf.keras.Model(inputs=[iput], outputs=[oput, printt])
model(4)

cant load custom dataset to cnn pretrained for feature extraction

hello i am newbie to all this and i am trying to feed the pretrained CNN VGG16 with a custom dataset of mine and then to achieve feature extraction for every image with numpy. but i am taking this error:'numpy.ndarray' object has no attribute 'load_img' really any help appreciate it.thanks
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
import matplotlib.pyplot as plt
import os
model = VGG16(weights='imagenet', include_top=False)
dir_images = "C:/Users/.../Desktop/db"
imgs = os.listdir(dir_images)
for imgnm in imgs:
image = plt.imread(os.path.join(dir_images, imgnm))
img = image.load_img(image, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
#np.save('features.csv', features)
You are overiding the module image of keras.preprocessing by your own actual images loaded with matplotlib.
So just change the line
image = plt.imread(os.path.join(dir_images, imgnm))
into somehting else like
arr_image = plt.imread(os.path.join(dir_images, imgnm))
and then this error will be gone.
But note that image.load_img takes path as input and not actual images of type ndarray so you should instead use load_img in the loop and remove the matplotlib loading.

Keras FFT layer has no effect

Hi i'm trying to implement an FFT in my model. I isolated the fft layer to better see the effect, but when I call my model on any data it returns the input, unaffected.
Here's my code with sample data:
import matplotlib.pyplot as plt
from keras.layers import Input, Lambda
from keras.models import Model
import tensorflow as tf
import numpy as np
def fftModel1D(input_shape):
x_input = Input(input_shape)
x = Lambda(lambda v: tf.cast(tf.spectral.fft(tf.cast(v,dtype=tf.complex64)),tf.float32))(x_input)
return Model(inputs=x_input, outputs=[x])
model = fftModel1D((1000, 1))
testData = np.asarray([np.expand_dims(np.sin(np.linspace(0, 100, 1000)), 1)])
pred = model.predict(testData)[0]
fig, axes = plt.subplots(1, 2)
axes[0].plot(np.squeeze(testData))
axes[1].plot(np.squeeze(pred))
plt.show()
This currently shows identical plots of sin(x) while I'm expecting the FFT on the second graph.
I'm using Python 3.6.8, Keras 2.2.4, Tensorflow 1.13.1
Since the input has 2 dimensions (shape is (1000, 1)), using tf.fft2D seems to work.

TypeError: Image data cannot be converted to float with plt.imshow after importing with tf.io.decode_jpeg

I'm trying to load a file with Tensorflow and visualize the result, but I'm getting TypeError: Image data cannot be converted to float
import tensorflow as tf
import matplotlib.pyplot as plt
image = tf.io.read_file('./my-image.jpg')
image = tf.io.decode_jpeg(image, channels=3)
print(image.shape) # (?, ?, 3)
plt.imshow(image)
Not sure about your tensorflow version. TensorFlow uses static computational graphs by default in 1.x. The data type of image you get is Tensor so that show this error. First create a custom picture.
import numpy as np
from PIL import Image
np.random.seed(0)
image = np.random.random_sample(size=(256,256,3))
im = Image.fromarray(image, 'RGB')
im.save('my-image.jpg')
Then You need to use tf.Session() to start this session. This will show the image created above.
import tensorflow as tf
import matplotlib.pyplot as plt
image = tf.io.read_file('my-image.jpg')
image = tf.io.decode_jpeg(image, channels=3)
print(image)
with tf.Session() as sess:
plt.imshow(sess.run(image))
plt.show()
# print
Tensor("DecodeJpeg:0", shape=(?, ?, 3), dtype=uint8)
Or you can start dynamic computational graphs by tf.enable_eager_execution() in tensorflow. The same effect is achieved with the above code.
import tensorflow as tf
import matplotlib.pyplot as plt
tf.enable_eager_execution()
image = tf.io.read_file('my-image.jpg')
image = tf.io.decode_jpeg(image, channels=3)
plt.imshow(image)
plt.show()
The default in tensorflow2 is dynamic computational graphs. You don't need to use tf.enable_eager_execution().

Categories

Resources