I need to invert the colors of an image in Python using PIL, the problem is that I only have to invert the colors of the right half of the image and I don't know how to do it. Here is an example of how the image should look like.
And here is the code I made, bot it invert the colors of all the image.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import PIL.ImageOps
image_file = Image.open("Abbildung1.jpg")
image_file.load()
image_data = np.asarray(image_file, dtype=np.uint8)
inverted_image = PIL.ImageOps.invert(image_file)
inverted_image.save("neuesBild.jpg")
You can use numpy to make two parts of the image then apply the transformation and finally combine it.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import PIL.ImageOps
image_file = Image.open("some_image.jpeg")
image_file.load()
image_data = np.asarray(image_file, dtype=np.uint8)
width = image_data.shape[1]
left_half = image_data[:,0:width//2, :]
right_half = image_data[:,width//2:, :]
inverted_image_right = np.asarray(PIL.ImageOps.invert(Image.fromarray(right_half)))
total_image = np.hstack((left_half, inverted_image_right))
inverted_image = Image.fromarray(total_image)
inverted_image.save("invertion_half.jpeg")
That's it:
from PIL import Image
import PIL.ImageOps
img = Image.open('img.png').convert('RGB')
img.paste(ImageOps.invert(img.crop((img.width/2,0,img.width,img.height))),box=(int(img.width/2),0))
We have croped, inverted and pasted this croped-inverted image back.
Then you can check:
img.show()
Related
for some reason this isn't working.
i may be making a silly mistake somewhere.
please help
# importing modules
import urllib.request
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from PIL import Image
#dowload mona lisa image
urllib.request.urlretrieve(
'https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg/1024px-Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg',
"Mona_Lisa.png")
#open the file
img = Image.open("/content/Mona_Lisa.png")
#convert to from rgba to rgb
rgb_image = img.convert('RGB')
rgb_image_rgb = np.array(rgb_image)
#show image
plt.imshow(rgb_image_rgb, cmap = cm.Greys_r)
have you tried this answer ?
How can I convert an RGB image into grayscale in Python?
from PIL import Image
img = Image.open('image.png').convert('L')
img.save('greyscale.png')
you can convert the image to grayscale using PIL.Image.convert:
img = Image.open("/content/Mona_Lisa.png").convert("L")
I have a grayscale image created using Pillow – it's mode L – and I'd like to save it as shades of a single colour, so that instead of shades from black-to-white, it's shades from cyan-to-white.
So, say I was doing this:
from PIL import Image, ImageOps
i = Image.open("old_img.jpg")
g = ImageOps.grayscale(i)
g.save("new_img.jpg")
What could I do to save it as cyan-to-white, rather than black-to-white? I'm going to do similar with other grayscale images for magenta-to-white and yellow-to-white too.
Convert your image to the "L" mode (luminosity, grayscale), and then use the .colorize() method instead of the .grayscale() one:
from PIL import Image, ImageOps
i = Image.open("old_img.jpg").convert("L")
g = ImageOps.colorize(i, black="cyan", white="white")
g.save("new_img.jpg")
or just add the command
g = ImageOps.colorize(g, black="cyan", white="white")
after applying the .grayscale(i) method (because it converts the image to the "L" mode, too):
from PIL import Image, ImageOps
i = Image.open("old_img.jpg")
g = ImageOps.grayscale(i)
g = ImageOps.colorize(g, black="yellow", white="white")
g.save("new_img.jpg")
You may set other desired color in the black= parameter of the .colorize() method.
You might be able to do that with matplotlib.imshow:
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import numpy as np
i = Image.open("frog.jpg")
g = ImageOps.grayscale(i)
fig, ax = plt.subplots(1, 1)
ax.imshow(np.array(g), cmap=plt.cm.Blues)
plt.show()
Result:
Here, I want to change the default sharpness of the image dataset. It works fine for a single image, but when I apply on multiple images, it shows me an error like AttributeError: 'numpy.ndarray' object has no attribute 'filter'. What should I do to fix this? To that end, my code is given below-
from PIL import Image
from PIL import ImageEnhance
import cv2
import glob
dataset = glob.glob('input/*.png')
other_dir = 'output/'
for img_id, img_path in enumerate(dataset):
img = cv2.imread(img_path,0)
enhancer = ImageEnhance.Sharpness(img)
enhanced_im = enhancer.enhance(8.0)
cl2 = cv2.resize(enhanced_im, (1024,1024), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(f'{other_dir}/enhanced_{img_id}.png',cl2)
You're trying to use PIL to enhance a numpy array. cv2 converts images from image paths into numpy arrays. This doesn't work with PIL image operations.
You can load the image using PIL, do the PIL enhancements then convert it to a numpy array to pass into your cv2.resize() method.
Try:
from PIL import Image
from PIL import ImageEnhance
import cv2
import glob
import numpy as np
dataset = glob.glob('input/*.png')
other_dir = 'output/'
for img_id, img_path in enumerate(dataset):
img = Image.open(img_path) # this is a PIL image
enhancer = ImageEnhance.Sharpness(img) # PIL wants its own image format here
enhanced_im = enhancer.enhance(8.0) # and here
enhanced_cv_im = np.array(enhanced_im) # cv2 wants a numpy array
cl2 = cv2.resize(enhanced_cv_im, (1024,1024), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(f'{other_dir}/enhanced_{img_id}.png',cl2)
from PIL import Image, ImageDraw, ImageFilter
im_rgb = Image.open('x.JPG')
im_a = Image.open('blackandwhitex.png').convert('L').resize(im_rgb.size)
im_rgba = im_rgb.copy()
im_rgba.putalpha(im_a)
im_rgba.save('xtransparent.png')
Thanks to this code I made transparent on blacka and put another photo on it, so in the end I have transparent background.
How it will be look like in opencv. I need open cv, because PIL rotate photos. But it is hard to write this for someone new in opencv and google colab.
I've made transparent black using this code:
import cv2
file_name = "x.png"
src = cv2.imread(file_name, 1)
tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(src)
rgba = [b,g,r, alpha]
dst = cv2.merge(rgba,4)
cv2.imwrite("newx.png", dst)
It's not so good...
After puttingc togehter second and third photo I want this:
It is the input of using PIL. (All images have the same size.)
Show color photo:
%pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img2=mpimg.imread('colorphoto.JPG')
imgplot = plt.imshow(img2)
plt.show()
Some photos are flipped after that.
Show black and white photo:
%pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img2=mpimg.imread('bawphoto.png')
imgplot = plt.imshow(img2)
plt.show()
For some of the photos I need to use:
from PIL import Image
def rotate(image_path, saved_location):
image_obj = Image.open(image_path)
transposed = image_obj.transpose(Image.ROTATE_90)
transposed = transposed.transpose(Image.ROTATE_180)
transposed.save(saved_location)
transposed.show()
if __name__ == '__main__':
image = 'colorphoto.JPG'
rotate(image, 'rotated_colorphoto.JPG')
And after that I convert them into one photo:
from PIL import Image, ImageDraw, ImageFilter
im_rgb = Image.open('rotated_colorphoto.JPG')
im_a = Image.open('bawphoto.png').convert('L').resize(im_rgb.size)
im_rgba = im_rgb.copy()
im_rgba.putalpha(im_a)
im_rgba.save('imagewithtransparentbackground.png')
I don't want flipped photos. I need them original size not flipped..
I have read the image from cifar-10-batches-python
import os
import numpy as np
from PIL import Image
from pylab import *
import matplotlib.pyplot as plt
from scipy.misc import imresize
# read data
data_dir = "F:\\dataSet\\cifar-10-batches-py"
testdata_dir="F:\\dataSet\\cifar-10-batches-py\\test_batch"
da=np.load(testdata_dir)
testdata=da['data']
testlabel=np.array(da['labels'])
train=np.empty((50000,3072))
label=np.empty((50000,))
for i in range(1,2):
str='data_batch_'+np.str(i)
path1=os.path.join(data_dir,str)
data=np.load(path1)
train[10000*(i-1):10000*i,:]=data['data']
label[10000*(i-1):10000*i,]=data['labels']
def intlabel(label):
for i in range(label.shape[0]):
label[i,]=int(float(label[i,]))
return label
def intdata(data):
n=data.shape[0]
for i in range(n):
for j in range(3072):
data[i,j]=int(float(data[i,j]))
return data
label,train=intlabel(label),intdata(train)
train,label=np.array(train),np.array(label)
train = train.reshape(train.shape[0], 3, 32,32)
train = train.astype('float32')
then I don't know how to resize the data.
I used the imreszie function to resize image, but the effect was not good
You can use opencv to pre-process the images-
import cv2
img = cv2.imread('IMAGE_LOCATION')
img_fin = cv2.resize(img, (227, 227))