AttributeError: 'decode' when reading TIFF images - python

Here is part of the code I am attempting to run:
import numpy as np
import os
import tensorflow as tf
import imageio
import sys
#Create tensorflowflow dataset
dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels))
if not is_test:
dataset = dataset.shuffle(num_of_samples)
dataset = dataset.repeat(None)
dataset = dataset.map(self._parse_dataset)
if not is_test:
batched_dataset = dataset.batch(self.batch_size, drop_remainder=True).prefetch(20)
else:
batched_dataset = dataset.batch(self.test_batch_size)
#Create the iterator
return batched_dataset, num_of_samples, path_strings
def get_batch(self, subset="train"):
batch_of_images = self.iterators[subset].get_next()
return batch_of_images
def _read_tif(self, file_path):
file_path = file_path.decode(sys.getdefaultencoding())
try :
im = imageio.imread(file_path)
except:
im = np.zeros((self.width, self.height, 3))
if len(im.shape) != 3:
im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
return im
def _read_image(self, file_path):
return tf.py_function(func=self._read_tif, inp=[file_path], Tout=tf.uint8)
and I have the following error coming up:
File "C:\PROJECTS_RUNNING2\pipeline\data_loader\data_generator.py", line 131, in _read_tif
file_path = file_path.decode(sys.getdefaultencoding())
AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object has no attribute 'decode'
The file_path is defined in the run.py and looks like this:
def main(config_file_path):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
config =tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.reset_default_graph()
config = parse_config_file(config_file_path)
#Create the experiment output folders, this is where the outputs will be saved
output_folder_path = config["output_path"]
output_path = create_output_folder(output_folder_path, config["experiment_name"])
copyfile(config_file_path, os.path.join(output_path, "%s_parameters.json" % config["experiment_name"]))
data_generator = DataGenerator(config)
Input and Output dataset file paths are correctly defined in the config file.
I a very much beginner in coding though have to use the script for analysis of my images and I am struggling to get it up and running. Im using Python 3.7 and Tensorflow 1.14. Any help to resolve this error will be really much appreciated!

Related

Running tensorflow on server Python

trying to use tensorflow on ipage server with operating system centOs 7
I don't know if there is a GPU or not , but I get this error message
kernel driver does not appear to be running on this host /proc/driver/nvidia/version does not exist
I tried this code
from tensorflow import config
config.set_soft_device_placement = True
and also this code trying to prevent tensorflow from using GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
but I kept receiving the same error message
Note :
I am trying to run face recognition script on server to be used by php through terminal using exec() function by passing the image as string base64 and then decoding it and then adding new face or fetching faces and comparing and then will be saved in the database
and this is my script :
import face_recognition as fr
from tensorflow.keras.models import model_from_json
import numpy as np
from PIL import Image
import base64
import io
import json
import cv2
from tensorflow import config
config.set_soft_device_placement = True
def check_if_spoof(image_string_base64):
# decoding the image
msg = base64.b64decode(image_string_base64)
buf = io.BytesIO(msg)
img = Image.open(buf).convert("RGB")
opencv_image = np.array(img)
test_image = opencv_image[:,:,::-1].copy()
json_file = open('antispoofing_models/antispoofing_model.json','r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load antispoofing model weights
model.load_weights('antispoofing_models/antispoofing_model.h5')
resized_face = cv2.resize(test_image,(160,160))
resized_face = resized_face.astype("float") / 255.0
# resized_face = img_to_array(resized_face)
resized_face = np.expand_dims(resized_face, axis=0)
# pass the face ROI through the trained liveness detector
# model to determine if the face is "real" or "fake"
preds = model(resized_face)[0]
if preds> 0.5:
return True
else:
return False
def decode_img(image_string_base64):
msg = base64.b64decode(image_string_base64)
buf = io.BytesIO(msg)
img = Image.open(buf)
return img
def resize_image(image_string_base64, height=500):
image = decode_img(image_string_base64)
height_percent = (height / float(image.size[1]))
width_size = int((float(image.size[0]) * float(height_percent)))
image = image.resize((width_size, height), Image.NEAREST)
return np.array(image)
def add_person(image_string_base64):
if check_if_spoof(image_string_base64):
return {'error':'image is fake'}
img = np.array(decode_img(image_string_base64))
img_encodings = fr.face_encodings(img, num_jitters=4)
return np.array(img_encodings)
def get_person(image_string_base64, data, tolerance=0.35):
if check_if_spoof(image_string_base64):
return {'error':'image is fake'}
data = json.loads(data)
resized_img = resize_image(image_string_base64)
img_encoding = fr.face_encodings(resized_img)
############## testing purpose only #########
# return fr.compare_faces(data,img_encoding)
#############################################
# type of recieved data is : { id ->number : encoding->list}
for user in data:
for id_ , encoding in user:
if True in fr.compare_faces(encoding, img_encoding):
return id_
return {'error':"user not existed"}

AttributeError: 'str' object has no attribute 'name' when using Streamlit

I've been trying to replicate the demo website from this repo using Streamlit.
But I'm stuck when I'm going to process the image with the model. The error message is AttributeError: 'str' object has no attribute 'name'. But in data.py or the code to read the image there is no 'name' attribute. Or I'm missing something here?
This is the snippet code
streamlitdemo.py
#st.cache()
def load_model():
gpu_ids=[]
model = create_model(gpu_ids)
model.eval()
return model
a = 'wave.jpg'
b = 'building.jpg'
c = 'test_samples/madoka.jpg'
def anime2sketch(img_input, load_size=512):
img, aus_resize = read_img_path(img_input.name, load_size)
model = load_model()
aus_tensor = model(img)
aus_img = tensor_to_img(aus_tensor)
image_pil = Image.fromarray(aus_img)
image_pil = image_pil.resize(aus_resize, Image.BICUBIC)
return image_pil
demo.py
.
.
.
def read_img_path(path, load_size):
"""read tensors from a given image path
Parameters:
path (str) -- input image path
load_size(int) -- the input size. If <= 0, don't resize
"""
img = Image.open(path).convert('RGB')
aus_resize = None
if load_size > 0:
aus_resize = img.size
transform = get_transform(load_size=load_size)
image = transform(img)
return image.unsqueeze(0), aus_resize
model.py
.
.
.
def create_model(gpu_ids=[]):
"""Create a model for anime2sketch
hardcoding the options for simplicity
"""
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
ckpt = torch.load('weights/netG.pth')
for key in list(ckpt.keys()):
if 'module.' in key:
ckpt[key.replace('module.', '')] = ckpt[key]
del ckpt[key]
net.load_state_dict(ckpt)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
return net
But, when I'm hardcode the path with a/b/c variable, the model work properly. And I've already change read_img_path(img_input.name, load_size) to read_img_path(img_input, load_size) and I got FileNotFoundError: [Errno 2] No such file or directory: 'wave' error message.
This is the output when I'm hardcode the path
In that repo, the author already provide demo website but using Gradio. When I tried to run the demo code with Gradio it is work properly. I'm using the same code from the author, but I tweak it a little bit.
Thank you.

Cannot get prediction from google AI platform with Pytorch

I've deployed a custom Pytorch model to the Google AI platform for prediction, but when I try to make a prediction request with image data using gcloud tools I get the following error in response:
{
"error": "Prediction failed: unknown error."
}
I've tried to encode my image data in b64 format or to place it into a multidimensional python array, by doing the following:
pil_im = Image.open('Pic512.png')
pil_im = pil_im.resize((224,224)).convert('RGB')
im_arr = np.asarray(pil_im)
py_arr = im_arr.tolist()
json_instance_1 = {'instances': py_arr}
with open('json_instance_1.json', 'w') as f:
json.dump(json_instance_1, f)
I converted it into b64 like so, after adjusting my Predictor code accordingly:
with open('Pic512.png', 'rb') as f:
byte_im = f.read()
json_instance = {'instances': {'b64': base64.b64encode(byte_im).decode()}}
with open('json_instance.json', 'w') as f:
json.dump(json_instance, f)
I've tried converting with different file formats and similar methods, but all of them give me the same error.
My predictor module:
from facenet_pytorch import MTCNN, InceptionResnetV1, extract_face
import torch
from torchvision import transforms
from torch.nn import functional as F
from PIL import Image
# from sklearn.externals import joblib
import numpy as np
import os
import io
import base64
class MyPredictor(object):
"""An example Predictor for an AI Platform custom prediction routine."""
def __init__(self, model, preprocessor, device):
"""Stores artifacts for prediction. Only initialized via `from_path`.
"""
self._resnet = model
self._mtcnn_mult = preprocessor
self._device = device
self.get_std_tensor = transforms.Compose([
np.float32,
np.uint8,
transforms.ToTensor(),
])
self.tensor2pil = transforms.ToPILImage(mode='RGB')
self.trans_resnet = transforms.Compose([
transforms.Resize((100, 100)),
np.float32,
transforms.ToTensor()
])
def predict(self, instances, **kwargs):
pil_transform = transforms.Resize((512, 512))
imarr = np.uint8(np.array(instances))
# img_bytes_string = io.BytesIO(base64.b64decode(instances))
pil_im = Image.fromarray(imarr)
# pil_im = Image.open(img_bytes_string)
image = pil_im.convert('RGB')
pil_im_512 = pil_transform(image)
boxes, _ = self._mtcnn_mult.detect(pil_im_512)
box = boxes[0]
face_tensor = extract_face(pil_im_512, box, margin=40)
std_tensor = self.get_std_tensor(face_tensor.permute(1, 2, 0))
cropped_pil_im = self.tensor2pil(std_tensor)
face_tensor = self.trans_resnet(cropped_pil_im)
face_tensor4d = face_tensor.unsqueeze(0)
face_tensor4d = face_tensor4d.to(self._device)
self._resnet.eval()
prediction = self._resnet(face_tensor4d)
preds = F.softmax(prediction, dim=1).detach().numpy().reshape(-1)
print('probability of (class1, class2) = ({:.4f}, {:.4f})'.format(preds[0], preds[1]))
return {'probs':preds.tolist()}
#classmethod
def from_path(cls, model_dir):
device_path = os.path.join(model_dir, 'device_cpu.pt')
device = torch.load(device_path)
model_path = os.path.join(model_dir, 'FullResNetRefinedExtra_no_norm_100x100_8634.pt')
classifier = torch.load(model_path, map_location=device)
mtcnn_path = os.path.join(model_dir, 'mtcnn_mult.pt')
mtcnn_mult = torch.load(mtcnn_path)
return cls(classifier, mtcnn_mult, device)
When I test the class locally everything works, so I assume it's a problem related the serialisation and deserialisation on the side of Google Platform. How can I resolve this issue?

'tuple' object has no attribute 'gpu_fraction'

I am now using colab for my reproduction of saliency detection!
I'm a student, so please understand that my knowledge is not enough,,,
I found the code using tensorflow, so I am trying to use that code to reproduce the project.
However, the author said the code was written on tensorflow 1.00 but I don't know the version of tensorflow if I just import tensorflow as tf from colab.
I am getting the error
'tuple' object has no attribute 'gpu_fraction'
and
module 'tensorflow' has no attribute 'GPUOptions'
Here is my source code please see what's my problem
import tensorflow as tf
import numpy as np
import os
from scipy import misc
import argparse
import sys
g_mean = np.array(([126.88,120.24,112.19])).reshape([1,1,3])
output_folder = "./test_output"
def rgba2rgb(img):
return img[:,:,:3]*np.expand_dims(img[:,:,3],2)
def main(args):
if not os.path.exists(output_folder):
os.mkdir(output_folder)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_fraction)
with tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)) as sess:
saver = tf.train.import_meta_graph('./meta_graph/my-model.meta')
saver.restore(sess,tf.train.latest_checkpoint('./salience_model'))
image_batch = tf.get_collection('image_batch')[0]
pred_mattes = tf.get_collection('mask')[0]
if args.rgb_folder:
rgb_pths = os.listdir(args.rgb_folder)
for rgb_pth in rgb_pths:
rgb = misc.imread(os.path.join(args.rgb_folder,rgb_pth))
if rgb.shape[2]==4:
rgb = rgba2rgb(rgb)
origin_shape = rgb.shape
rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3],interp="nearest").astype(np.float32)-g_mean,0)
feed_dict = {image_batch:rgb}
pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict)
final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape)
misc.imsave(os.path.join(output_folder,rgb_pth),final_alpha)
else:
rgb = misc.imread(args.rgb)
if rgb.shape[2]==4:
rgb = rgba2rgb(rgb)
origin_shape = rgb.shape[:2]
rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3],interp="nearest").astype(np.float32)-g_mean,0)
feed_dict = {image_batch:rgb}
pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict)
final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape)
misc.imsave(os.path.join(output_folder,'alpha.png'),final_alpha)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--rgb', type=str,
help='input rgb',default = None)
parser.add_argument('--rgb_folder', type=str,
help='input rgb',default = None)
parser.add_argument('--gpu_fraction', type=float,
help='how much gpu is needed, usually 4G is enough',default = 1.0)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
You can try just getting rid of the explicit GPU configuration, i.e. turn your
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_fraction)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
into
with tf.Session() as sess:

Creating tfrecord with audio sample, extracted from video in python

This code is for creating tfrecords which is tensorflows standard input format for keeping audios and labels taken from video samples.This file is given as input for training in neural network.
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import menpo
import tensorflow as tf
import numpy as np
import os
from io import BytesIO
from pathlib import Path
from moviepy.editor import VideoFileClip
from menpo.visualize import progress_bar_str, print_progress
from moviepy.audio.AudioClip import AudioArrayClip
root_dir = Path('/home/user/Desktop/PROJECT/Multimodal-Emotion-Recognition-master/RECOLA') #Where RECOLA is located
portion_to_id = dict(
train = [1], # 25
valid = [70, 71],
test = [80, 81] # 54, 53
) #samples taken
def get_samples(subject_id): #location of arousal and valence files and appropriate video sample
arousal_label_path = root_dir / 'ratings_individual/arousal/{}.csv'.format(subject_id)
valence_label_path = root_dir / 'ratings_individual/valence/{}.csv'.format(subject_id)
clip = VideoFileClip(str(root_dir /"Video_recordings_MP4/{}.mp4".format(subject_id)))
subsampled_audio = clip.audio.set_fps(16000)
audio_frames = []
for i in range(1, 7501): #extract audio sample
try:
time = 0.04 * i
audio = np.array(list(subsampled_audio.subclip(time - 0.04, time).iter_frames()))
audio = audio.mean(1)[:640]
audio_frames.append(audio.astype(np.float32))
except ValueError:
print('Not float')
quit()
try:
arousal = np.loadtxt(str(arousal_label_path), delimiter=',')[:+1][1:]
valence = np.loadtxt(str(valence_label_path), delimiter=',')[:+1][1:]
return audio_frames, np.dstack([arousal, valence])[0].astype(np.float32) #return audio frames
except ValueError:
print('problem')
def get_jpg_string(im):
# Gets the serialized jpg from a menpo `Image`.
fp = BytesIO()
menpo.io.export_image(im, fp, extension='jpg')
fp.seek(0)
return fp.read()
def _int_feauture(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feauture(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_sample(writer, subject_id):
subject_name = 'P{}'.format(subject_id)
print(subject_name)
print(get_samples)
#repeat for each sample
for i, (audio, label) in enumerate(zip(*get_samples(subject_name))):
example = tf.train.Example(features=tf.train.Features(feature={
'sample_id': _int_feauture(i),
'subject_id': _int_feauture(subject_id),
'label': _bytes_feauture(label.tobytes()),
'raw_audio': _bytes_feauture(audio.tobytes()),
})) #extract sample_id,subject_id,label and raw_audio
writer.write(example.SerializeToString())
del audio, label
def main(directory):
print('In Main')
for portion in portion_to_id.keys():
print(portion)
for subj_id in print_progress(portion_to_id[portion]):
temp = (directory / 'tf_records' / portion / '{}.tfrecords'.format(subj_id)
).as_posix() #display sample
print(temp)
writer = tf.python_io.TFRecordWriter(
(directory / 'tf_records' / portion / '{}.tfrecords'.format(subj_id)
).as_posix()) #write to tfrecords
serialize_sample(writer, subj_id)
if __name__ == "__main__":
print("Calling Main")
main(Path('/home/user/Desktop/PROJECT/Multimodal-Emotion-Recognition-master/records')) #save tfrecord
This code raises an error and terminates.I have given all paths to locate input video.
Error
for i, (audio, label) in enumerate(zip(*get_samples(subject_name))):
TypeError: zip() argument after * must be an iterable, not NoneType
Why do I get this error?
do you have following video/audio files in your test, train and valid folders:
train = P1.mp4
valid = P70.mp4 , P71.mp4
test = P80.mp4 , P81.mp4 ??
because the code: zip(*get_samples(subject_name)) seems to unable to fetch the data: Nonetype!

Categories

Resources