Loading a numpy array into Tensorflow input pipeline - python

So I am following a tutorial for making a dataloader for images (https://github.com/codebasics/deep-learning-keras-tf-tutorial/blob/master/44_tf_data_pipeline/tf_data_pipeline.ipynb).
The full code is something like this:
images_ds = tf.data.Dataset.list_files("path/class/*")
def get_label(file_path):
import os
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
## How the tutorial does it
def process_image(file_path):
label = get_label(file_path)
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img)
return img, label
## How I want to do it
def process_image(file_path):
label = get_label(file_path)
img = np.load(file_path)
img = tf.convert_to_tensor(img)
return img, label
train_ds = images_ds.map(process_image)
In the tutorial, the data is a .jpeg. However, my data is a .npy.
Therefore, loading the data with the following code does not work:
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img)
I want to work around this problem, but my solution does not work.
img = np.load(file_path)
img = tf.convert_to_tensor(img)
It does work when I feed the process_image function 1 instance. However, when I use the .map function, I get an error.
Error:
TypeError: expected str, bytes or os.PathLike object, not Tensor
Is there an equivalent function to tf.image.decode_image() for decoding a numpy array and/or can someone help me with my current error?

The comment of #André put me in the right direction. The code below works.
def process_image(file_path):
label = get_label(file_path)
label = np.uint8(label)
img = np.load(file_path)
img = tf.convert_to_tensor(img/255, dtype=tf.float32)
return img , label
train_ds = images_ds.map(lambda item: tf.numpy_function(
process_image, [item], (tf.float32, tf.uint8)))

Related

error while trying to predict on images in tensorflow

I am trying to make a website that can make predictions on images using tensorflow, flask, and python.
This is my code:
from flask import Flask, render_template
import os
import numpy as np
import pandas as pd
app = Flask(__name__)
#app.route('/')
def index():
return render_template('index.html')
import tensorflow as tf
import tensorflow_hub as hub
model = tf.keras.models.load_model(MODEL_PATH)
IMG_SIZE = 224
BATCH_SIZE = 32
custom_path = "http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0"
custom_data = create_data_batches(custom_path, test_data=True)
custom_preds = model.predict(custom_data)
# Get custom image prediction labels
custom_pred_labels = [get_pred_label(custom_preds[i]) for i in range(len(custom_preds))]
print(custom_pred_labels)
#app.route('/my-link/')
def my_link():
return f"The predictions are: {custom_pred_labels}"
if __name__ == '__main__':
app.run(host="localhost", port=3000, debug=True)
The process_image function:
def process_image(image_path, img_size=IMG_SIZE):
"""
Takes an image file path and turns the image into a Tensor.
"""
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, size=[img_size, img_size])
return image
The needed part of the create_data_batches function:
def create_data_batches(X, y=None, batch_size=BATCH_SIZE, valid_data=False, test_data=False):
"""
Creates batches out of data out of image (X) and label (y) pairs.
Shuffles the data if it's training data but doesn't shuffle if it's validation data.
Also accepts test data as input (no labels)
"""
if test_data:
print("Creating test data batches...")
data = tf.data.Dataset.from_tensor_slices((tf.constant(X))) # only filepaths (no labels)
data_batch = data.map(process_image).batch(BATCH_SIZE)
return data_batch
The get_image_label function:
def get_image_label(image_path, label):
"""
Takes an image file path name and the associated label, processes the image and returns a tuple of (image, label).
"""
image = process_image(image_path)
return image, label
The get_pred_label function:
def get_pred_label(prediction_probabilites):
"""
Turns an array of prediction probabilities into a label.
"""
return unique_breeds[np.argmax(prediction_probabilites)]
Now when I run this, I get the following error:
ValueError: Unbatching a tensor is only supported for rank >= 1
I tried turning it into a list as one of the solutions I found said:
custom_path = ["http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0"]
But when I run that, I get this error:
UNIMPLEMENTED: File system scheme 'http' not implemented (file: 'http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0')
Any help would be appreciated.

cuMemcpyHtoDAsync failed: invalid argument by using TensorRT (Python)

I am trying to copy an np array to the GPU using TensorRT in Python but I keep getting the error 'cuMemcpyHtoDAsync failed: invalid argument'. The array has the correct format (float32) and size, but the error remains. Does anyone have an idea of what I am doing wrong or how I can fix this error?
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import cv2
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
stream = cuda.Stream()
# stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(host_mem)
else:
outputs.append(host_mem)
return inputs, outputs, bindings, stream
def do_inference(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp, i, stream) for inp, i in zip(bindings[:len(inputs)], inputs)]
# Run inference.
context.execute_async(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out, o, stream) for out, o in zip(outputs, bindings[len(inputs):])]
# Synchronize the stream
stream.synchronize()
def detect_objects(image, engine, context, threshold=0.5):
# Preprocess the image
image = cv2.resize(image, (640, 640))
image = np.transpose(image, (2, 0, 1))
image = np.expand_dims(image, axis=0)
# Allocate buffers
inputs, outputs, bindings, stream = allocate_buffers(engine)
#inputs[0] = np.ascontiguousarray(image)
inputs[0] = np.ascontiguousarray(image, dtype=np.float32) / 255.0
print(inputs[0].shape)
print(inputs[0].dtype)
# Run inference
do_inference(context, bindings, inputs, outputs, stream)
# Postprocess the outputs
outputs = outputs[0]
outputs = outputs[outputs[:, 0] > threshold]
# Get the bounding boxes
boxes = outputs[:, 1:]
return boxes
# Load the engine
engine = trt.Runtime(trt.Logger(trt.Logger.WARNING)).deserialize_cuda_engine(open("Modelle/best.engine", "rb").read())
context = engine.create_execution_context()
# Read the image
image = cv2.imread("Test.jpg")
# Detect objects in the image
boxes = detect_objects(image, engine, context)
print (boxes)
or am I doing something fundamentally wrong when loading the tensorRT file? Is there another way to index an object on an image?
Thanks

How to change tensor image dimensions with a manually defined class in Tensorflow?

I have a manually defined class to load images in Tensorflow 2.
class MasterImage0(object):
def __init__(self,PATH='', IMAGE_SIZE = 128):
self.PATH = PATH
self.IMAGE_SIZE = IMAGE_SIZE
self.image_data = []
self.x_data = []
self.y_data = []
self.CATEGORIES = []
# This will get List of categories
self.list_categories = []
def get_categories(self):
for path in os.listdir(self.PATH):
if '.DS_Store' in path:
pass
else:
self.list_categories.append(path)
print("Found Categories ",self.list_categories,'\n')
return self.list_categories
def process_image(self):
try:
"""
Return Numpy array of image
:return: X_Data, Y_Data
"""
self.CATEGORIES = self.get_categories()
for categories in self.CATEGORIES: # Iterate over categories
train_folder_path = os.path.join(self.PATH, categories) # Folder Path
class_index = self.CATEGORIES.index(categories) # this will get index for classification
for img in os.listdir(train_folder_path): # This will iterate in the Folder
new_path = os.path.join(train_folder_path, img) # image Path
try: # if any image is corrupted
image_data_temp = cv2.imread(new_path) # Read Image as numbers
image_temp_resize = cv2.resize(image_data_temp,(self.IMAGE_SIZE,self.IMAGE_SIZE))
self.image_data.append([image_temp_resize,class_index])
random.shuffle(self.image_data)
except:
pass
data = np.asanyarray(self.image_data)
# Iterate over the Data
for x in data:
self.x_data.append(x[0]) # Get the X_Data
self.y_data.append(x[1]) # get the label
X_Data = np.asarray(self.x_data) / (255.0) # Normalize Data
Y_Data = np.asarray(self.y_data)
# reshape x_Data
X_Data = X_Data.reshape(-1, self.IMAGE_SIZE, self.IMAGE_SIZE, 3)
print("Compiled X_data", self.X_data)
return X_Data, Y_Data
except:
print("Failed to run Function Process Image ")
def pickle_image(self):
"""
:return: None Creates a Pickle Object of DataSet
"""
# Call the Function and Get the Data
X_Data,Y_Data = self.process_image()
# Write the Entire Data into a Pickle File
pickle_out = open('X_Data','wb')
pickle.dump(X_Data, pickle_out)
pickle_out.close()
# Write the Y Label Data
pickle_out = open('Y_Data', 'wb')
pickle.dump(Y_Data, pickle_out)
pickle_out.close()
print("Pickled Image Successfully ")
return X_Data,Y_Data
print(self.X_Data)
def load_dataset(self):
try:
# Read the Data from Pickle Object
X_Temp = open('..\SeekThermal\X_Data','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open('..\SeekThermal\Y_Data','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
except:
print('Could not Found Pickle File ')
print('Loading File and Dataset ..........')
X_Data,Y_Data = self.pickle_image()
print("X_data", self.X_Data)
return X_Data,Y_Data
So I can load my training images like this and the shape of my tensor is 100*100*3.
a = MasterImage0(PATH = path_train,IMAGE_SIZE = 128)
#a.load_dataset()
(train_images, train_labels) = a.load_dataset()
print(train_images.shape
Out: (4732, 100, 100, 3)
The original image size is 300x400 pixels. I do not understand why I cannot change the the tensor shape to 128 instead of 100, for example. I thought I could do it by setting IMAGE_SIZE = 128?
Where does it take the 100 from? thank you.
cv2.resize() and np.array.reshape() must work correctly, if they did not work, they would throw exception, but obviously, no exception was thrown here, only just your setting IMAGE_SIZE=128 did not work.
So, here the problem is most likely caused by using pickle, which cached your codes' buffer (all resized and reshaped images) at the first time and reuesd it again when you re-run your codes.
If so, I suspect that you must have set IMAGE_SIZE=100 once, cached all (*,100,100,3) results, and here when you run by setting IMAGE_SIZE=128, the cached(pickled) file ((*,100,100,3) size files) will just be returned, leading to this problem.
You can delete all cached files in ..\SeekThermal and try again
or
rewrite try part of func load_dataset like the following to give each cached file a stamp
...
try:
# Read the Data from Pickle Object
X_Temp = open(f'..\SeekThermal\X_Data_{self.IMAGE_SIZE}','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open(f'..\SeekThermal\Y_Data_{self.IMAGE_SIZE}','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
...

pic should be Tensor or ndarray. Got <class ‘NoneType’>

I am a beginner in PyTorch. I want to train a network using NYU dataset, but I am getting an error.
The error happens while I use the Dataloader to load my local dataset, and I want to print the data to demonstrate the code is right:
test=Mydataset(data_root,transforms,'image_train')
test2=DataLoader(test,batch_size=4,num_workers=0,shuffle=False)
for idx,data in enumerate(test2):
print(idx)
Here's the rest of the code with the Mydataset definition:
from __future__ import division,absolute_import,print_function
from PIL import Image
from torch.utils.data import DataLoader,Dataset
from torchvision.transforms import transforms
data_root='D:/AuxiliaryDocuments/NYU/'
transforms=transforms.Compose([transforms.ToPILImage(),
transforms.Resize(224,101),
transforms.ToTensor()])
filename_txt={'image_train':'image_train.txt','image_test':'image_test.txt',
'depth_train':'depth_train.txt','depth_test':'depth_test.txt'}
class Mydataset(Dataset):
def __init__(self,data_root,transformation,data_type):
self.transform=transformation
self.image_path_txt=filename_txt[data_type]
self.sample_list=list()
f=open(data_root+'/'+data_type+'/'+self.image_path_txt)
lines=f.readlines()
for line in lines:
line=line.strip()
line=line.replace(';','')
self.sample_list.append(line)
f.close()
def __getitem__(self, index):
item=self.sample_list[index]
img=Image.open(item)
if self.transform is not None:
img=self.transform(img)
idx=index
return idx,img
def __len__(self):
return len(self.sample_list)
The error in the title is different from the one in the image (which you should have posted as text, by the way). Assuming the one from the image is correct, your problem is the following:
Your transforms begins with a transforms.ToPILImage(), but the image is already read as a PIL image by the dataloader. If you remove that transformation, the code should run just fine.
# [...]
transforms = transforms.Compose([
transforms.ToPILImage(), # <<< remove this
transforms.Resize(224, 101),
transforms.ToTensor()
])
# [...]
class Mydataset(Dataset):
# [...]
def __getitem__(self, index):
item = self.sample_list[index]
img = Image.open(item) # <<< this image is already a PIL image
if self.transform is not None:
img = self.transform(img)
idx = index
return idx, img
# [...]

Getting a value error from converting python list into numpy array

I'm working with a piece of code written by someone else for domain generalization, and as part of it, I have a dataloader set up for loading my training, validation, and test data for one of my datasets. The code works fine when I load in the train or test data but when I try and load in the val data, I get Value Error: could not broadcast input array from shape (320,371) into shape (320) in the load_samples function at the images=np.asarray(images) line. I understand what this error is saying but I can't for the life of me figure out why it's saying it. The code for the val section is identical to the ones for the train and test sections and the csv file I'm reading from is the exact same format as the other two csv files. I'm also calling the get_chexpert function for each of them the exact same way. Additionally, the dataloader for my other dataset has nearly identical code to this one and can create the validation set just fine. I tried testing if it was the csv file by replacing the val csv with the test csv but I still get the same error. Can anyone point out to me what I'm doing wrong? I feel like it must be some stupidly obvious mistake but I just can't see it.
import os
import csv
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
from torchvision import datasets, transforms
import params
class Chexpert(data.Dataset):
def __init__(self, root, train=True, val=False, transform=None):
"""Init chexpert dataset."""
# init params
self.root = os.path.expanduser(root)
self.train = train
self.val = val
self.transform = transform
self.dataset_size = None
self.train_data, self.train_labels = self.load_samples()
if self.train:
total_num_samples = self.train_labels.shape[0]
indices = np.arange(total_num_samples)
np.random.shuffle(indices)
self.train_data = self.train_data[indices[0:self.dataset_size]]
self.train_labels = self.train_labels[indices[0:self.dataset_size]]
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, label = self.train_data[index], self.train_labels[index]
if self.transform is not None:
img = self.transform(img)
label = torch.LongTensor([np.int64(label).item()])
return img, label
def __len__(self):
"""Return size of dataset."""
return self.dataset_size
def load_samples(self):
"""Load sample images from dataset."""
# some arbitrary limits so I'm not loading 100,000 images while debugging
numtr = 50
numts = 20
numvl = 10
data_root = os.path.join(self.root, 'CheXpert-v1.0-small')
images = []
labels = []
if self.val:
val_info = csv.reader(open(os.path.join(data_root, 'effusion-val-split.csv'), 'r'))
for count, row in enumerate(val_info):
if count == numvl:
break
image = np.array(Image.open(os.path.join(self.root, row[0])))
images.append(image)
labels.append(row[1])
elif self.train:
train_info = csv.reader(open(os.path.join(data_root, 'effusion-train-split.csv'), 'r'))
for count, row in enumerate(train_info):
if count == numtr:
break
image = np.array(Image.open(os.path.join(self.root, row[0])))
images.append(image)
labels.append(row[1])
elif not self.val and not self.train:
test_info = csv.reader(open(os.path.join(data_root, 'effusion-test-split.csv'), 'r'))
for count, row in enumerate(test_info):
if count == numts:
break
image = np.array(Image.open(os.path.join(self.root, row[0])))
images.append(image)
labels.append(row[1])
images = np.asarray(images)
labels = np.asarray(labels)
self.dataset_size = labels.shape[0]
return images, labels
def get_chexpert(train, val):
"""Get chexpert dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
#transforms.Normalize(
#mean=params.dataset_mean,
#std=params.dataset_std)])
])
# dataset and data loader
chexpert_dataset = Chexpert(root=params.data_root,
train=train,
val=val,
transform=pre_process)
chexpert_data_loader = torch.utils.data.DataLoader(
dataset=chexpert_dataset,
batch_size=params.batch_size,
shuffle=True)
return chexpert_data_loader
if __name__ == '__main__':
# load dataset
print("Loading Source Train Data")
src_data_loader = get_chexpert()
print("Loading Source Validation Data")
src_data_loader_val = get_chexpert(train=False, val=True)
print("Loading Source Test Data")
src_data_loader_eval = get_chexpert(train=False)
print("Loading Target Train Data")
tgt_data_loader = get_nih()
print("Loading Target Validation Data")
tgt_data_loader_val = get_nih(train=False, val=True)
print("Loading Target Test Data")
tgt_data_loader_eval = get_nih(train=False)

Categories

Resources