I want to predict my image from a pre-trained keras xception image model. I have written some code but I get errros. The code is below
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
# Load the pre-trained Xception model to be used as the base encoder.
xception = keras.applications.Xception(
include_top=False, weights="imagenet", pooling="avg"
)
# Set the trainability of the base encoder.
for layer in xception.layers:
layer.trainable = False
# Receive the images as inputs.
#inputs = layers.Input(shape=(299, 299, 3), name="image_input")
input ='/content/1.png'
input = tf.keras.preprocessing.image.load_img(input,target_size=(299,299,3))
BATCH_SIZE = 1
NUM_BOXES = 5
IMAGE_HEIGHT = 256
IMAGE_WIDTH = 256
CHANNELS = 3
CROP_SIZE = (24, 24)
boxes = tf.random.uniform(shape=(NUM_BOXES, 4))
box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0,
maxval=BATCH_SIZE, dtype=tf.int32)
output = tf.image.crop_and_resize(input, boxes, box_indices, CROP_SIZE)
xception_input = tf.keras.applications.xception.preprocess_input(output)
plt.imshow(xception_input/255.)
I want to display 5 boxes of each image as written in code. However I get the following error.
ValueError: Attempt to convert a value (<PIL.Image.Image image mode=RGB size=299x299 at 0x7F1DF6044F10>)
with an unsupported type (<class 'PIL.Image.Image'>) to a Tensor.
With tf.keras.preprocessing.image.load_img the image is loaded in a PIL format. You'll have to convert that to a numpy array before getting the prediction:
image = tf.keras.preprocessing.image.load_img(image_path)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
Related
I have to apply tf.image.crop_and_resize on my images and want to generate 5 boxes from each image. I have written the below code which works fine
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
# Load the pre-trained Xception model to be used as the base encoder.
xception = keras.applications.Xception(
include_top=False, weights="imagenet", pooling="avg"
)
# Set the trainability of the base encoder.
for layer in xception.layers:
layer.trainable = False
# Receive the images as inputs.
inputs = layers.Input(shape=(299, 299, 3), name="image_input")
input ='/content/1.png'
input = tf.keras.preprocessing.image.load_img(input,target_size=(299,299,3))
image = tf.expand_dims(np.asarray(input)/255, axis=0)
BATCH_SIZE = 1
NUM_BOXES = 5
IMAGE_HEIGHT = 256
IMAGE_WIDTH = 256
CHANNELS = 3
CROP_SIZE = (24, 24)
boxes = tf.random.uniform(shape=(NUM_BOXES, 4))
box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0, maxval=BATCH_SIZE, dtype=tf.int32)
output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE)
xception_input = tf.keras.applications.xception.preprocess_input(output)
The above code works fine however when I want to display these boxes I run below code
for i in range(5):
# define subplot
plt.subplot(330 + 1 + i)
# generate batch of images
batch = xception_input.next()
# convert to unsigned integers for viewing
image = batch[0].astype('uint8')
image = np.reshape(24,24,3)
# plot raw pixel data
plt.imshow(image)
#show the figure
plt.show()
But it generates this error AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object has no attribute 'next'.
You have to use [i] instead of .next()
And there is also problem with converting it to uint8 (but it doesn't need to reshape)
for i in range(5):
plt.subplot(331 + i)
tensor = xception_input[i]
#print(tensor)
tensor = tensor*255
image = np.array(tensor, dtype=np.uint8)
#print(image)
plt.imshow(image)
or use for to get items
for i, tensor in enumerate(xception_input):
#print(tensor)
plt.subplot(331 + i)
tensor = tensor*255
image = np.array(tensor, dtype=np.uint8)
#print(image)
plt.imshow(image)
I don't know what your code should do but this gives me empty images because tensor has values like -0.9 and it convert it all to 0
I am new to VGG19 and image processing in python. I am trying to test my trained VGG19 model for predicting an image. I am getting this error:-
ValueError: Input 0 is incompatible with layer functional_3: expected shape=(None, 224, 224, 3), found shape=(None, 240, 240, 3)
My tensorflow code for predicting is:-
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import load_model
model = load_model('VGG19.h5')
CATEGORIES = ["Pneumonia", "Non-Pneumonia"]
img = cv2.imread('person1_bacteria_1.jpeg')
img = cv2.resize(img,(240,240)) # resize image to match model's expected sizing
img = np.reshape(img,[1,240,240,3]) # return the image with shaping that TF wants.
prediction = model.predict(img)
prediction
But in the case of .ipynb file I simply get a warning regarding this:-
This the image
You are resizing to wrong shape. Instead of 240,240
img = cv2.resize(img,(240,240)) # resize image to match model's expected sizing
img = img.reshape(1,240,240,3) # return the image with shaping that TF wants.
Use 224,224
img = cv2.resize(img,(224,224)) # resize image to match model's expected sizing
img = img.reshape(1,224,224,3) # return the image with shaping that TF wants.
Your pretrained model is expecting an input of shape (224,224,3) and you are feeding it (240,240,3), hence the complaint.
So just do:
img = img.reshape(1,224,224,3)
And you are good to go!
I created my model in pytorch and is working really good, but when i want to test just one image batch_size=1 always return the second class (in this case a dog).
I tried to test with batch > 1 and in all cases this works!
The architecture:
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
so my tensors are [batch, 3, 224, 224]
i have tried with:
resize
reshape
unsqueeze(0)
the response when is one image is always [[0.4741, 0.5259]]
My Test Code
from PIL import *
msize = 256
loader = transforms.Compose([transforms.Scale(imsize), transforms.ToTensor()])
def image_loader(image_name):
"""load image, returns cuda tensor"""
image = Image.open(image_name)
image = loader(image).float()
image = image.unsqueeze(0)
return image.cuda()
image = image_loader('Cat_Dog_data/test/cat/cat.16.jpg')
with torch.no_grad():
logits = model.forward(image)
ps = torch.exp(logits)
_, predTest = torch.max(ps,1)
print(ps) ## same value in all cases
imagen_mostrar = images[ii].to('cpu')
helper.imshow(imagen_mostrar,title=clas_perro_gato(predTest), normalize=True)
Second Test Code
andrea_data = datasets.ImageFolder(data_dir + '/andrea', transform=test_transforms)
andrealoader = torch.utils.data.DataLoader(andrea_data, batch_size=1, shuffle=True)
dataiter = iter(andrealoader)
images, labels = dataiter.next()
images, labels = images.to(device), labels.to(device)
ps = torch.exp(model.forward(images))
_, predTest = torch.max(ps,1)
print(ps.float())
if i changed my batch_size to 1 always returned a tensor who say that is a dog [0.43,0.57] for example.
Thanks!
I realized that my model wasn't in eval mode.
So i just added model.eval() and now that's all, works for any size batch
You can use this code for test single image for your model train:
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader,Dataset
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
def pre_image(image_path,model):
img = Image.open(image_path)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform_norm = transforms.Compose([transforms.ToTensor(),
transforms.Resize((224,224)),transforms.Normalize(mean, std)])
# get normalized image
img_normalized = transform_norm(img).float()
img_normalized = img_normalized.unsqueeze_(0)
# input = Variable(image_tensor)
img_normalized = img_normalized.to(device)
# print(img_normalized.shape)
with torch.no_grad():
model.eval()
output =model(img_normalized)
# print(output)
index = output.data.cpu().numpy().argmax()
classes = train_ds.classes
class_name = classes[index]
return class_name
example:
predict_class = pre_image("C:/Users/Salio/Desktop/example.jpeg",your_model)
print(predict_class)
If your model is "correct" it just predicts a dog, you can get the label with torch.argmax(output, dim=1) no matter the size of batch.
Anyway, you shouldn't use LogSoftmax as activation, please use torch.nn.BCEWithLogitsLoss as your loss function and remove activation from your final layer and output only one neuron (probability of the image being a dog only). It would look like this in your case:
classifier = nn.Sequential(
OrderedDict(
[
("fc1", nn.Linear(1024, 500)),
("relu", nn.ReLU()),
("fc2", nn.Linear(500, 1)),
# See? No activation needed
]
)
)
You can the correct label with the above network simply by running output > 0 + you get numerical stability "for free".
I want to modify the values of each layers during prediction process. I have one convolution layer, dense layer and output layer (softmax).
I want to modify the result of convolution layer's output before passing it to hidden layers during prediction time.
import tensorflow as tf
from keras import backend as K
from keras.utils import to_categorical
# make a prediction for a new image.
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import pandas as pd# load and prepare the image
def load_image(img):#
# load the image
#img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
#img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('uint8')
img = img #/ 255
return img
data_test = pd.read_csv('mnistdata/mnist_testE.csv')
X_test = np.array(data_test.iloc[:, 1:])
y_test = to_categorical(np.array(data_test.iloc[:, 0]))
# load an image and predict the class
#X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)#def run_example():
# load the image
img = load_image(X_test[1])
# load model
model = load_model('final_modelv4.h5')
digit = model.predict_classes(img)
print(digit)
Can anyone guide me how to modify this code to change the values of each layer's results?
Providing the solution here (Answer Section), even though it is present in the Comments Section, for the benefit of the community.
You can Convert a TensorFlow model into a compressed flat buffer with the TensorFlow Lite Converter. Quantize by converting 32-bit floats to more efficient 8-bit integers or run on GPU.
You can find details about tensroflow lite here.
This page will be most relevant for your query - Get started with TensorFlow Lite
Hello I am trying to do my first RNN using Keras and Tensorflow, but I am getting stuck on an issue or reshaping my images to fit into the model.
I have looked at this post but could not figure out about the reshaping:
Keras - Input a 3 channel image into LSTM
What I have is a bunch of images that are taken at every frame in a video. I saved all the frames outside of python so I have a very large folder of images.I separated the frames into 21 frames for a segment so 21 images per motion that I want to capture. I want to read in these 21 images as one sequence. I have the same sequence captured from multiple cameras/angles which I want to us in this model. What I want to try is to model a movement and see if a person is doing this movement or not, so it is a binary model yes or no basically. Not the most sophisticated but its a learning process to use this model and keras.
I need help figuring out how to use these images inside the keras model. I have looked at a few tutorials on MINST data set but that didnt help me figure this out.
Any help will be appreciated.
This is the error that is given to me when I try to train the model
ValueError: Error when checking input: expected lstm_1_input to have 3 dimensions, but got array with shape (2026, 200, 200, 1)
My code is this:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from tqdm import tqdm
import cv2
import os
import numpy as np
imageSize = 200
#create lables for each image
def labelImage(img):
wordLabel = img.split('.')[-3]
#Conversion to one hot array [lat,not]
if wordLabel == "FWAC":
return[1,0]
else:
return[0,1]
#Process images and add lables
#Convert data into an array and add its lable
def makeTrainingData():
print("Creating Training Data")
trainingData = []
for img in tqdm(os.listdir(trainDir)):
label = labelImage(img)
path = os.path.join(trainDir,img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imageSize,imageSize))
trainingData.append([np.array(img),np.array(label)])
#Save the array file to load it into other models if needed
np.save("trainingData.npy", trainingData)
print("Training Data Saved")
return trainingData
#process the testing data in the same manner
def processTestData():
print("Creating Testing Data")
testData = []
for img in tqdm(os.listdir(testDri)):
print("image", img)
path = os.path.join(testDri, img)
imgNum = img.split(".")[0]
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imageSize, imageSize))
testData.append([np.array(img), imgNum])
np.save("testingData.npy", testData)
print("Testing Data Saved")
return testData
rnnSize = 512
model = Sequential()
model.add(LSTM(rnnSize, input_shape=(imageSize, imageSize)))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
#Data
trainDir = "D:/TrainingDataSets/TrainingSet/"
testDri = "D:/TrainingDataSets/TestingSet/"
#trainData = makeTrainingData()
#testData = processTestData()
trainData = np.load('trainingData.npy')
testData = np.load("testingData.npy")
#resize the image to this See above
train = trainData[:-500]
test = trainData[-200:]
x = []
y = []
for xi in trainData:
x.append(xi[0].reshape((-1, imageSize, imageSize)))
y.append(xi[1])
x_train = np.array([i[0] for i in train]).reshape(-1,imageSize, imageSize,1)
y_train = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,imageSize , imageSize,1)
test_y = [i[1] for i in test]
epoch = 5
batchSize = 100
model.fit(x_train, y_train, epochs=epoch, batch_size= batchSize, verbose=1, shuffle=False)
For the error before dense layers add this line:
model.add(Flatten())
Previously, you should import:
from keras.layers import Flatten