Image copy does not show the same image on OpenCV - python

I would like to display a copy of an image but it does not work.
def display_and_close(img):
cv2.imshow("test",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('assets/tests.jpeg',0)
width, height = img.shape
new_img = np.zeros((width, height))
new_img[:width, :height] += img[:width, :height]
display_and_close(new_img)
display_and_close(img)
I also tried to iterate over the image like this :
for i in range(img.shape[0]):
for j in range(img.shape[1]):
new_img[i][j] = img[i][j]
but it does not work again

You need to specify the dtype as uint8 for your black image in Python/OpenCV or it will default to float.
So replace
new_img = np.zeros((width, height))
with
new_img = np.zeros((width, height), dtype=np.uint8)
Also note that Numpy and shape use y,x notation (height, width) and you are using (width, height). But since you get the shape reversed also, you are OK. But you should reverse both.

Related

Rotating an Image 90 degrees

def rotate_picture_90_left(img: Image) -> Image:
"""Return a NEW picture that is the given Image img rotated 90 degrees
to the left.
Hints:
- create a new blank image that has reverse width and height
- reverse the coordinates of each pixel in the original picture, img,
and put it into the new picture
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
rotated_img = Image.new('RGB', (img_height, img_width))
pixelz = rotated_img.load()
for i in range(img_width):
for j in range(img_height):
pixelz[i, j] = pixels[i, j]
return rotated_img
I believe my code does not seem to work because of the new image I have created and the reverse width, length and reversing the coordinates in the original picture. How can I fix my code to rotate the image correctly?
You need to consider following logic when converting coordinates:
y turning to x
x turning to y but moving from end to start
Here is the code:
from PIL import Image
def rotate_picture_90_left(img: Image) -> Image:
w, h = img.size
pixels = img.load()
img_new = Image.new('RGB', (h, w))
pixels_new = img_new.load()
for y in range(h):
for x in range(w):
pixels_new[y, w-x-1] = pixels[x, y]
return img_new
Example:
⇒

Greyscale Image python Implementation

I try to convert a RGB image to grayscale using python as a function but the problem is I give it a RGB image that have height, width and channel but after the code I should have an image with just height and width but it gives me an image with height, width and channel why?
def RGBtoGRAY(img):
height, width, channels = img.shape
grayimg = img
for i in range(height):
for j in range(width):
grayimg[i,j] = 0.3 * image[i,j][0] + 0.59 * image[i,j][1] + 0.11 * image[i,j][2]
return grayimg
the size of the input image is
image.shape
(533, 541, 3)
the size of the output image is
grayimage.shape
(533, 541, 3)
normally I want to find in the size of the output image
(533, 541)
You should avoid using for loops when performing image processing since it is very slow. Instead you can use Numpy which is highly optimized for vector operations. Using this grayscale conversion formula:
gray = R * .299 + G * .587 + B * .114
Method #1: apply_along_axis:
import cv2
import numpy as np
def grayscale(colors):
r, g, b = colors
return 0.299 * r + 0.587 * g + 0.114 * b
# Create image of size 100x100 of random pixels
# Convert to grayscale
image = np.random.randint(255, size=(100,100,3),dtype=np.uint8)
gray = np.apply_along_axis(grayscale, 2, image)
# Display
cv2.imshow('image', image)
cv2.imshow('gray', gray)
cv2.waitKey()
Before -> After
Method #2: cv2.cvtColor
You could use OpenCV directly and read in the image as grayscale with cv2.imread by passing in the cv2.IMREAD_GRAYSCALE or 0 flag to load the image as grayscale.
image = cv2.imread('img.png', cv2.IMREAD_GRAYSCALE) # OR
# image = cv2.imread('img.png', 0)
If you already have the image loaded, you can convert the RGB or BGR image to grayscale using cv2.cvtColor
image = cv2.imread('img.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Assuming you are using a for loop, because you intent to solve it "manually" (like C code), there are number of issues with your implementation:
The assignment grayimg = img in Python does not create a copy of img (the result is that grayimg referencing img).
You meant to use: grayimg = img.copy().
img has 3 dimensions, so when using grayimg = img, grayimg also has 3 dimensions.
You need to create grayimg with two dimensions.
Example for creating grayimg and initialize to zeros:
grayimg = np.zeros((height, width), img.dtype)
Inside the for loop, you are using image instead of img.
Here is a corrected version of RGBtoGRAY:
def RGBtoGRAY(img):
height, width, channels = img.shape
#grayimg = img
# Create height x width array with same type of img, and initialize with zeros.
grayimg = np.zeros((height, width), img.dtype)
for i in range(height):
for j in range(width):
grayimg[i,j] = 0.3 * img[i,j][0] + 0.59 * img[i,j][1] + 0.11 * img[i,j][2]
return grayimg

How to resize a square image to rectangle with white borders in Python

I have many 708x708 pictures which I need to resize into a 500x250px, keeping the ratio the same. I imagined this can be done by resize the actual image to 250x250 via Image.thumbnail('image.jpg'), and adding two white borders to fill the remainder of the space. However, I don't know how to do the latter. The following code gives me the thumbnail image of 250x250px.
image = img
img
image.thumbnail((500, 250))
image.save('image_thumbnail.jpg')
print(image.size)
Question is similar to this one.
Any suggestions would be much appreciated!
Check this method in skimage package. There's parameter called mode, where you can control desired behaviour.
Try the following:
import cv2
import numpy as np
img = cv2.imread('myimage.jpg', cv2.IMREAD_UNCHANGED)
print('Original Dimensions : ',img.shape)
width = int(img.shape[1] * 35.31 / 100) # 250/708 is 35%
height = int(img.shape[0] * 35.31 / 100)
dim = (width, height)
resized_image = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
print('Resized_image Dimensions : ',resized_image.shape)
row, col = resized_image.shape[:2]
bottom = resized_image[row-2:row, 0:col]
bordersize = 125
border = cv2.copyMakeBorder(
resized_image,
top=bordersize,
bottom=bordersize,
left=0,
right=0,
borderType=cv2.BORDER_CONSTANT,
value=[255, 255, 255]
)
cv2.imshow('image', resized_image)
cv2.imshow('left', bottom)
cv2.imshow('right', border)
cv2.waitKey(0)
cv2.destroyAllWindows()
I tried the following: first, I make a thumbnail of size (250,250) and alter the image with ImageOps.expand to add two white borders to make the dimensions (250, 250).
from PIL import Image, ImageOps
img = Image.open('801595.jpg')
img.thumbnail((500, 250))
print(img.size)
img_with_border = ImageOps.expand(img, border = (125, 0) ,fill='white')
img_with_border.save('imaged-with-border2.jpg')

How do I make rectangular image squared using OpenCV and Python?

I have all sorts of images of rectangular shape. I need to modify them to uniform square shape (different size ok).
For that I have to layer it on top of larger squared shape.
Background is black.
I figured it to the point when I need to layer 2 images:
import cv2
import numpy as np
if 1:
img = cv2.imread(in_img)
#get size
height, width, channels = img.shape
print (in_img,height, width, channels)
# Create a black image
x = height if height > width else width
y = height if height > width else width
square= np.zeros((x,y,3), np.uint8)
cv2.imshow("original", img)
cv2.imshow("black square", square)
cv2.waitKey(0)
How do I stack them on top of each other so original image is centered vertically and horizontally on top of black shape?
I figured it. You need to "broadcast into shape":
square[(y-height)/2:y-(y-height)/2, (x-width)/2:x-(x-width)/2] = img
Final draft:
import cv2
import numpy as np
if 1:
img = cv2.imread(in_img)
#get size
height, width, channels = img.shape
print (in_img,height, width, channels)
# Create a black image
x = height if height > width else width
y = height if height > width else width
square= np.zeros((x,y,3), np.uint8)
#
#This does the job
#
square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = img
cv2.imwrite(out_img,square)
cv2.imshow("original", img)
cv2.imshow("black square", square)
cv2.waitKey(0)
You can use numpy.vstack to stack your images vertically and numpy.hstack to stack your images horizontally.
Please mark answered if you this resolves your problem.

Python Image Library: image saved after pixel manipulation is always white

I have the following code. src_img is a 1250x1250 rgb image. I want to create another grayscale image with averaged intensity.
from PIL import Image
img = Image.open(src_img)
width, height = img.size
avg_img = Image.new('1', img.size, 'black')
avg_pixels = avg_img.load()
for x in range(width):
for y in range(height):
r, g, b = img.getpixel((x, y))
avg_pixels[x, y] = int((r + g + b) / 3.0)
avg_img.save('avg.tiff')
But the resulting avg.tiff file is plain white. I can see that avg_pixels has the necessary values but the saved image doesn't correspond to those.
Mode '1' is a bilevel image - meaning either white or black. For grayscale, you want 'L'.
avg_img = Image.new('L', img.size, 'black')

Categories

Resources