Image.open PermissionError: [Errno 13] Permission denied: - python

I am making an image classifier to classify rockets and airplanes using Python and TensorFlow, but I'm having trouble loading my training images folder with Pil.Image.Open. This is my code:
train_data = "C:/Users/Will Downs/image_training/training_data/"
test_data = "C:/Users/Will Downs/image_training/test_data/"
def train_data_with_label():
train_images = []
for i in tqdm(os.listdir(train_data)):
path = os.path.join(train_data, i)
img = Image.open(path)
img.thumbnail((64, 64), Image.ANTIALIAS) # resizes image in-place
train_images.append([np.array(img), one_hot_label(i)])
shuffle(train_images)
return train_images
def test_data_with_label():
test_images = []
for i in tqdm(os.listdir(test_data)):
path = os.path.join(test_data, i)
img = Image.open(path)
img.thumbnail((64, 64), Image.ANTIALIAS) # resizes image in-place
test_images.append([np.array(img), one_hot_label(i)])
shuffle(test_images)
return test_images
This is the error I get:
PermissionError Traceback (most recent call last)
<ipython-input-17-f3b44f76f884> in <module>
46 return test_images
47
---> 48 training_images = train_data_with_label()
49 testing_images = test_data_with_label()
50 tr_img_data = np.array([i[0] for i in training_images]).reshape(-1,64,64,1)
<ipython-input-17-f3b44f76f884> in train_data_with_label()
30 for i in tqdm(os.listdir(train_data)):
31 path = os.path.join(train_data, i)
---> 32 img = Image.open(path)
33 img.thumbnail((64, 64), Image.ANTIALIAS) # resizes image in-place
34 train_images.append([np.array(img), one_hot_label(i)])
~\Anaconda3\lib\site-packages\PIL\Image.py in open(fp, mode)
2768
2769 if filename:
-> 2770 fp = builtins.open(filename, "rb")
2771 exclusive_fp = True
2772
PermissionError: [Errno 13] Permission denied: 'C:/Users/Will Downs/image_training/training_data/Airplane'
Any suggestions on why this is or how I can fix it?

The issue was a simple folder formatting one. I had the images in folders based on their label, instead of being pooled together but named according to their label.

Related

How to modify image in custom Tensorflow layer? (working example provided)

How can I draw a filled rectangle as a custom (data augmentation) layer in Tensorflow 2 on Python 3?
Input
Expected output
With image_pil = Image.fromarray(image), I get the error:
AttributeError: Exception encountered when calling layer "remove_patch_5" (type RemovePatch).
'tensorflow.python.framework.ops.EagerTensor' object has no attribute '__array_interface__'
Call arguments received by layer "remove_patch_5" (type RemovePatch):
• image=tf.Tensor(shape=(300, 300, 3), dtype=uint8)
• training=True
Example
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw
class RemovePatch(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, image, training=None):
if not training:
return image
# This is the part that doesn't work
# image_pil = Image.fromarray(image)
"""
AttributeError: Exception encountered when calling layer "remove_patch_5" (type RemovePatch).
'tensorflow.python.framework.ops.EagerTensor' object has no attribute '__array_interface__'
Call arguments received by layer "remove_patch_5" (type RemovePatch):
• image=tf.Tensor(shape=(200, 200, 3), dtype=uint8)
• training=True
"""
# image = np.array(
# ImageDraw.Draw(image_pil).rectangle(
# [50, 50, 100, 100], fill="#000000"
# )
# )
# This part works for adjusting brightness,
# but no built-in function for drawing a
# rectangle was found
# image = tf.image.adjust_brightness(image, 0.5)
return image
layer = RemovePatch()
image_file = "image.jpg"
try:
open(image_file)
except FileNotFoundError:
from requests import get
r = get("https://picsum.photos/seed/picsum/300/300")
with open(image_file, "wb") as f:
f.write(r.content)
with Image.open(image_file) as img:
img = np.array(img)
augmented = layer(img, training=True)
augmented = np.array(augmented)
# plt.imshow(img)
plt.imshow(augmented)
show_expected = False
if show_expected:
with Image.open(image_file) as img:
ImageDraw.Draw(img).rectangle([50, 50, 100, 100], fill="#000000")
plt.imshow(img)
A working solution with #bui's comment:
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw
class RemovePatch(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, image, training=None):
if not training:
return image
image_pil = Image.fromarray(image.numpy())
ImageDraw.Draw(image_pil).rectangle([50, 50, 100, 100], fill="#000000")
image = np.array(image_pil)
return image
layer = RemovePatch()
image_file = "image.jpg"
try:
open(image_file)
except FileNotFoundError:
from requests import get
r = get("https://picsum.photos/seed/picsum/300/300")
with open(image_file, "wb") as f:
f.write(r.content)
with Image.open(image_file) as img:
img = np.array(img)
augmented = layer(img, training=True)
augmented = Image.fromarray(augmented)
plt.imshow(augmented)

Problem with loading image data using Pytorch dataset and dataloader

I have a problem with loading image data.
train_dir = 'images'
train_mask_dir = 'masks'
class TissueDataset(Dataset):
def __init__(self, image_dir, mask_dir, transforms=None):
self.image_dir = image_dir
self.mask_dir = mask_dir
self.transforms = transforms
self.images = os.listdir(image_dir)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = os.path.join(self.image_dir, self.images[idx])
mask_path = os.path.join(self.mask_dir, self.images[idx])
image = np.array(Image.open(img_path).convert('RGB'))
mask = np.array(Image.open(mask_path).convert('L'), dtype=np.float32)
mask = np.round(mask / 255).astype(np.float32)
if self.transforms:
aug = self.transforms(image=image, mask=mask)
image = aug['image']
mask = aug['mask']
return image, mask
train_dataset = TissueDataset(
image_dir = train_dir,
mask_dir = train_mask_dir,
transforms=None
)
train_loader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
num_workers=2,
pin_memory=PIN_MEMORY,
shuffle=True
)
x, y = next(iter(train_loader))
print(f'x = shape: {x.shape}; type: {x.dtype}')
print(f'x = min: {x.min()}; max: {x.max()}')
print(f'y = shape: {y.shape}; class: {y.unique()}; type: {y.dtype}')
The error I have is following:
FileNotFoundError Traceback (most recent call last)
<ipython-input-36-869de9fa31b7> in <module>()
----> 1 x, y = next(iter(train_loader))
2
3 print(f'x = shape: {x.shape}; type: {x.dtype}')
4 print(f'x = min: {x.min()}; max: {x.max()}')
5 print(f'y = shape: {y.shape}; class: {y.unique()}; type: {y.dtype}')
3 frames
/usr/local/lib/python3.7/dist-packages/torch/_utils.py in reraise(self)
432 # instantiate since we don't know how to
433 raise RuntimeError(msg) from None
--> 434 raise exception
435
436
FileNotFoundError: Caught FileNotFoundError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.7/dist-packages/torch/utils/data/_utils/fetch.py", line 49, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "<ipython-input-29-c33cd66a240c>", line 16, in __getitem__
mask = np.array(Image.open(mask_path).convert('L'), dtype=np.float32)
File "/usr/local/lib/python3.7/dist-packages/PIL/Image.py", line 2843, in open
fp = builtins.open(filename, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'masks/2018_74969_1-1_2019-02-2100_48_39-lv1-35186-14908-3285-3747.jpg'
I cannot understand why it is showing right directory for wrong files (or otherwise) when images and masks are in the right directories. I've also checked my custom dataset and seems that it is working right (I can open this images).
img_path = os.path.join(train_dir, os.listdir(train_dir)[0])
mask_path = os.path.join(train_mask_dir, os.listdir(train_mask_dir)[3])
image = np.array(Image.open(img_path).convert('RGB'))
mask = np.array(Image.open(mask_path).convert('L'), dtype=np.float32)
mask = np.round(mask / 255).astype(np.float32)
print(mask_path)
print(img_path)
Output:
masks/18-09530A_2019-05-0723_50_03-lv1-34626-18358-3736-6181_mask.jpg
images/18-09530A_2019-05-0723_50_03-lv1-34626-18358-3736-6181.jpg
I will really appreciate any help or tip on this issue.
You can use exact paths like "C:\sample_folder\masks\example.jpg" in order to use relative paths like "masks/example.jpg".
Please check the values of these assignments
img_path = os.path.join(train_dir, os.listdir(train_dir)[0])
mask_path = os.path.join(train_mask_dir, os.listdir(train_mask_dir)[3])

Opening image from github with python PIL image.open()

I am hoping to classify some line drawings with a pretrained resnet model and am loading them from a github page. I think the error is coming from me setting up the location of the file wrong, but any help would be appreciated.
The link for the github is here
Here is my code:
loc = 'https://github.com/AlexSwiderski/Images/tree/main/pnt'
fname1 = 'ambulance_resized.png'
response = requests.get(loc + fname1)
image = Image.open(BytesIO(response.content)).resize((256, 256))
data = torch.from_numpy(np.asarray(image)[:, :, :3]) / 255.
My error is as follows:
UnidentifiedImageError Traceback (most recent call last)
<ipython-input-29-6e447d67525f> in <module>()
4 fname1 = 'ambulance_resized.png'
5 response = requests.get(loc + fname1)
----> 6 image = Image.open(BytesIO(response.content)).resize((256, 256))
7 data = torch.from_numpy(np.asarray(image)[:, :, :3]) / 255.
8
/usr/local/lib/python3.7/dist-packages/PIL/Image.py in open(fp, mode)
2894 warnings.warn(message)
2895 raise UnidentifiedImageError(
-> 2896 "cannot identify image file %r" % (filename if filename else fp)
2897 )
2898
UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7f61e16decb0>
You need add a slash before the string, otherwise the concatenated path would be
"https://github.com/AlexSwiderski/Images/tree/main/pntambulance_resized.png"
Which is invalid.
loc = 'https://github.com/AlexSwiderski/Images/tree/main/pnt'
fname1 = '/ambulance_resized.png'
response = requests.get(loc + fname1)
image = Image.open(BytesIO(response.content)).resize((256, 256))
data = torch.from_numpy(np.asarray(image)[:, :, :3]) / 255.

error permission acces denied when load image for classification with keras

How can i load image from keras with local image ? can i ? because im getting error permission access denied?
im trying to classification image with tensorflow from local jupyter notebook.
import numpy as np
from keras.preprocessing import image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = image.load_img(os.getcwd()+'/tmp/test/british_cat/', target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
# model.summary()
print(classes[0:10])
if classes[0][0]==1:
print('British Cat')
elif classes[0][1]==1:
print('Love Bird')
elif classes[0][2]==1:
print('Koi Fish')
else:
print('error')
the error is :
---------------------------------------------------------------------------
PermissionError Traceback (most recent call last)
<ipython-input-49-7b705ffefd08> in <module>
6
7
----> 8 img = image.load_img(os.getcwd()+'/tmp/test/british_cat/', target_size=(150,150))
9 imgplot = plt.imshow(img)
10 x = image.img_to_array(img)
~\miniconda3\envs\myenv\lib\site-packages\tensorflow\python\keras\preprocessing\image.py in load_img(path, grayscale, color_mode, target_size, interpolation)
298 ValueError: if interpolation method is not supported.
299 """
--> 300 return image.load_img(path, grayscale=grayscale, color_mode=color_mode,
301 target_size=target_size, interpolation=interpolation)
302
~\miniconda3\envs\myenv\lib\site-packages\keras_preprocessing\image\utils.py in load_img(path, grayscale, color_mode, target_size, interpolation)
111 raise ImportError('Could not import PIL.Image. '
112 'The use of `load_img` requires PIL.')
--> 113 with open(path, 'rb') as f:
114 img = pil_image.open(io.BytesIO(f.read()))
115 if color_mode == 'grayscale':
PermissionError: [Errno 13] Permission denied: 'c:\\Users\\AZHAR IE\\Documents\\project\\webscraping google\\Google-Image-Scraper-master/tmp/test/british_cat/
You can get the image names by "os.listdir" then you can load images one by one using for loop.
folder= "./tmp/test/british_cat/"
image_names=os.listdir(folder)
for filename in image_names:
img = tf.keras.preprocessing.image.load_img(folder+filename, target_size=(150, 150))
x=image.img_to_array(img)
x=np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)

File system scheme '[local]' not implemented in Google Colab TPU

I am using TPU runtime in Google Colab, but having problems in reading files (not sure). I initialized TPU using:
import tensorflow as tf
import os
import tensorflow_datasets as tfds
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
I have many images in a folder in Google Colab storage ( e.g. '/content/train2017/000000000009.jpg'). I run the following code:
import tensorflow as tf
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
load_image('/content/train2017/000000000009.jpg')
But, I am getting the following error:
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
<ipython-input-33-a7fbb45f3b76> in <module>()
----> 1 load_image('/content/train2017/000000000009.jpg')
5 frames
<ipython-input-7-862c73d29b96> in load_image(image_path)
2 img = tf.io.read_file(image_path)
3 img = tf.image.decode_jpeg(img, channels=3)
----> 4 img = tf.image.resize(img, (299, 299))
5 img = tf.keras.applications.inception_v3.preprocess_input(img)
6 return img, image_path
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/image_ops_impl.py in resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name)
1515 preserve_aspect_ratio=preserve_aspect_ratio,
1516 name=name,
-> 1517 skip_resize_if_same=False)
1518
1519
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/image_ops_impl.py in _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name, skip_resize_if_same)
1183 with ops.name_scope(name, 'resize', [images, size]):
1184 images = ops.convert_to_tensor(images, name='images')
-> 1185 if images.get_shape().ndims is None:
1186 raise ValueError('\'images\' contains no shape.')
1187 # TODO(shlens): Migrate this functionality to the underlying Op's.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in get_shape(self)
1071 def get_shape(self):
1072 """Alias of Tensor.shape."""
-> 1073 return self.shape
1074
1075 def _shape_as_list(self):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in shape(self)
1065 self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
1066 except core._NotOkStatusException as e:
-> 1067 six.raise_from(core._status_to_exception(e.code, e.message), None)
1068
1069 return self._tensor_shape
/usr/local/lib/python3.6/dist-packages/six.py in raise_from(value, from_value)
UnimplementedError: File system scheme '[local]' not implemented (file: '/content/train2017/000000000009.jpg')
How should I solve it? I found something like a gs bucket, but it is paid. Is there any other way to solve this?
Cloud TPUs can only access data in GCS as only the GCS file system is registered. Please see: https://cloud.google.com/tpu/docs/troubleshooting#cannot_use_local_filesystem for more details.
Though for checkpointing starting with TF 2.3 release you should be able to use the experimental_io_device='/job:localhost' option (https://www.tensorflow.org/api_docs/python/tf/train/CheckpointOptions) to store/load your checkpoints to and from your Colab runtime. Even with that API though you'll need to load data from GCS.
Example:
checkpoint = tf.train.Checkpoint(model=model)
local_device_option = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.write(checkpoint_path, options=local_device_option)
For loading file from local file when using TPU - read them as normal python file.read() (not tf.io). In your case:
def load_image(image_path):
with open(image_path, "rb") as local_file: # <= change here
img = local_file.read()
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
load_image('/content/train2017/000000000009.jpg')

Categories

Resources