PIL ImageChops.screen in OpenCV - python

How can I write the same code from PIL in OpenCV
img3 = ImageChops.screen(im1, im2)

You can implement it with the formula used by ImageChops.screen:
out = MAX - ((MAX - image1) * (MAX - image2) / MAX) (source)
The code:
import cv2
import numpy as np
im1 = cv2.imread('im1.png').astype(np.uint16)
im2 = cv2.imread('im2.png').astype(np.uint16)
im3 = (255 - ((255 - im1) * (255 - im2) / 255)).astype(np.uint8)
cv2.imwrite('im3.png', im3)
The promotion to uint16s is necessary because of the multiplication of two uint18 values, at the end I've casted it back into uint8s because the values are guaranteed to be < 256 again.

Screen superimposes two inverted images on top of each other (source)
you can do this too (without numpy):
import cv2
# read the input images, they can be color (RGB) images too
im1 = cv2.imread('im1.jpg')
im2 = cv2.imread('im2.jpg')
# images must be of same size, if not resize one of the images
if im1.shape != im2.shape:
im2 = cv2.resize(im2, im1.shape[:2][::-1], interpolation = cv2.INTER_AREA)
# invert and normalize first image
im1 = cv2.normalize(cv2.bitwise_not(im1), None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# invert and normalize second image
im2 = cv2.normalize(cv2.bitwise_not(im2), None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# superimpose two images, re-normalize and invert
im = cv2.bitwise_not(cv2.normalize(cv2.multiply(im1, im2), None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U))
# write the output image
cv2.imwrite('im.jpg', im)

Related

how to extract individual pixel(color/rgb) value from one image and transfer to the second image

I'm trying to transfer pixel value from one image and transferring it to other image.
so, basically I have 2 images and my goal is to transfer colors of img1 to 2 according to the regions.
link to img1 img2 and expected image
here I am aware to extract color channel out of an image , but I am not able to achieve the required result. I'll highly appreciate any help.
my approach:
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
os.chdir('/kaggle/input/maskedoutput')
stroke_list = natsorted(os.listdir())
for i,v in enumerate(stroke_list):
image = cv2.imread(v, cv2.IMREAD_UNCHANGED)
if image.shape[2] == 4:
a1 = ~image[:,:,3]
image = cv2.add(cv2.merge([a1,a1,a1,a1]), image)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
plt.imshow((image))
plt.show()
copy = image.copy()
kernel = np.ones((15,15), np.uint8)
closing = cv2.morphologyEx(copy, cv2.MORPH_CLOSE, kernel)
img_erode = cv2.erode(closing, kernel, iterations=1)# to supress black outline
height, width, channel = img_erode.shape
for x1 in range(0,width):
for y1 in range(0,height):
channels_x1y1 = img_erode[y1,x1]
os.chdir('/kaggle/input/maskstrokes')
output = natsorted(os.listdir())
for idx,v1 in enumerate(output):
if(v==v1):
print(v, v1)
img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
subtracted = cv2.subtract(img_out, img_erode)
else:
continue
plt.imshow(cv2.cvtColor(subtracted, cv2.COLOR_BGR2RGB))
plt.show()
here i'm meaning to first erode the original coloured image in order to supress the black outline. Then next extracting color pixels and in the image2 after reading it i'm trying to subtract it with img1 the residual would be the colored outline, but this code is not working gives mte this error:
---------------------------------------------------------------------------
error Traceback (most recent call last)
/tmp/ipykernel_33/3647166721.py in <module>
43 print(v, v1)
44 img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
---> 45 subtracted = cv2.subtract(img_out, img_erode)
46 # if img_out.shape[2] == 4:
47 # a1 = ~img_out[:,:,3]
error: OpenCV(4.5.4) /tmp/pip-req-build-jpmv6t9_/opencv/modules/core/src/arithm.cpp:647: error: (-209:Sizes of input arguments do not match) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function 'arithm_op'
another approach was to directly pick color pixels from image1 and directly transfer it to second image but as you can see the image has 3 parts with different colors and so its not happening
code:
os.chdir('/kaggle/input/maskedoutput')
stroke_list = natsorted(os.listdir())
for i,v in enumerate(stroke_list):
image = cv2.imread(v, cv2.IMREAD_UNCHANGED)
if image.shape[2] == 4:
a1 = ~image[:,:,3]
image = cv2.add(cv2.merge([a1,a1,a1,a1]), image)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
plt.imshow((image))
plt.show()
copy = image.copy()
kernel = np.ones((15,15), np.uint8)
closing = cv2.morphologyEx(copy, cv2.MORPH_CLOSE, kernel)
img_erode = cv2.erode(closing, kernel, iterations=1)# to supress black outline
height, width, channel = img_erode.shape
for x1 in range(0,width):
for y1 in range(0,height):
channels_x1y1 = img_erode[y1,x1]
os.chdir('/kaggle/input/maskstrokes')
output = natsorted(os.listdir())
for idx,v1 in enumerate(output):
if(v==v1):
print(v, v1)
img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
height2, width2, channel2 = img_out.shape
for x1 in range(0,width2):
for y1 in range(0,height2):
channels_x1y1 = img_out[y1,x1]
else:
continue
plt.imshow(cv2.cvtColor(img_out, cv2.COLOR_BGR2RGB))
plt.show()
Blocker image
I prepared a quick fix solution based on the expected output.
I used the following image as input:
Code:
img = cv2.imread('colored_objects.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# binary mask
mask = cv2.threshold(gray,10,255,cv2.THRESH_BINARY)[1]
# inverted binary mask
th = cv2.threshold(gray,10,255,cv2.THRESH_BINARY_INV)[1]
# finding external contours based on inverted binary mask
contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# create copy of original image to draw contours
img2 = img.copy()
In the following, we iterate through each contour.
For each contour:
get the centroid of the contour (centroid)
get the color at the centroid from original image (color)
draw the contour with that color on the copy of original image (img2)
code:
for c in contours:
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
centroid = (cx, cy)
color = tuple(img[cy, cx])
color = ( int (color [ 0 ]), int (color [ 1 ]), int (color [ 2 ]))
print(color)
img2 = cv2.drawContours(img2, [c], 0, tuple(color), -1)
Now we subtract the original from the newly drawn image r. Based on mask, wherever pixels are white we assign white in r
r = img2 - img
r[mask == 255] = (255, 255, 255)
Update:
Expected result for the latest image. The green shade is present on the border as expected. This was obtained using the same code without any changes:

Increasing Intensity of Certain Image Areas in OpenCV

I currently have the following image and the salience map below which reflects the attention areas of the first image:
Both of them are the same size. What I am trying to do is amplify the region of areas that are very white in the salient region. For example, the eyes, collar and hair would be a bit more highlighted. I have the following code which I have tried to split the image into h, s, v and then multiply through but the output is black and white. Any help is greatly appreciated:
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
dimensions = (384, 384)
saliencyMap = cv2.resize(saliencyMap, dimensions)
s1 = s * saliencyMap.astype(s.dtype)
hsv_image = cv2.merge([h, s1, v])
out = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
cv2.imshow('example', out)
cv2.waitKey()
Here is how to do that in Python/OpenCV. Add the two images (from your other post). Modify the mask to have values near a mean of mid-gray. Separate the image into H,S,V channels. Apply the mask to the Saturation channel doing hard light composition. Combine the new saturation with the old hue and value channels and convert back to BGR.
Input 1:
Input 2:
Mask:
import cv2
import numpy as np
# read image 1
img1 = cv2.imread('img1.png')
hh, ww = img1.shape[:2]
# read image 2 and resize to same size as img1
img2 = cv2.imread('img2.png')
img2 = cv2.resize(img2, (ww,hh))
# read saliency mask as grayscale and resize to same size as img1
mask = cv2.imread('mask.png')
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask = cv2.resize(mask, (ww,hh))
# add img1 and img2
img12 = cv2.add(img1, img2)
# get mean of mask and shift mean to mid-gray
# desirable for hard light compositing
# add bias as needed
mean = np.mean(mask)
bias = -32
shift = 128 - mean + bias
mask = cv2.add(mask, shift)
# threshold mask at mid gray and convert to 3 channels
# (needed to use as src < 0.5 "if" condition in hard light)
thresh = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)[1]
# convert img12 to hsv
hsv = cv2.cvtColor(img12, cv2.COLOR_BGR2HSV)
# separate channels
hue,sat,val = cv2.split(hsv)
# do hard light composite of saturation and mask
# see CSS specs at https://www.w3.org/TR/compositing-1/#blendinghardlight
satf = sat.astype(np.uint8)/255
maskf = mask.astype(np.uint8)/255
threshf = thresh.astype(np.uint8)/255
threshf_inv = 1 - threshf
low = 2.0 * satf * maskf
high = 1 - 2.0 * (1-satf) * (1-maskf)
new_sat = ( 255 * (low * threshf_inv + high * threshf) ).clip(0, 255).astype(np.uint8)
# combine new_sat with old hue and val
result = cv2.merge([hue,new_sat,val])
# save results
cv2.imwrite('img12_sat_hardlight.png', result)
# show results
cv2.imshow('img12', img12)
cv2.imshow('mask', mask)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Remove vignette filter of colored image

I am new to Python OpenCV image processing. I want to remove the border/outline shadow of images as shown below. I checked 'how to remove shadow from scanned images' which does not work for me. Is this even possible?
Your problem of border/outline shadows reminded me of the vignette filter. You can have a look at this question if you want to know more about it. So essentially our task to remove the effect of the vignette filter and then increase brightness.
#####VIGNETTE
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
height, width = img.shape[:2]
original = img.copy()
# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(width, 150)
kernel_y = cv2.getGaussianKernel(height, 150)
kernel = kernel_y * kernel_x.T
mask = 255 * kernel / np.linalg.norm(kernel)
# applying the mask to each channel in the input image
for i in range(3):
img[:, :, i] = img[:, :, i] * mask
cv2.imshow('Original', original)
cv2.imshow('Vignette', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
To counter the effect change img[:, :, i] = img[:, :, i] * mask to img[:, :, i] = img[:, :, i] / mask
Now we need to increase the brightness of the image. For this, we will convert the image to HSV and increase the values of saturation and value matrices. To know about it in more detail you can refer to this article.
#THE FULL CODE
import cv2
import numpy as np
img = cv2.imread('shadow.jpg')
original = cv2.imread('bright.jpg')
height, width = img.shape[:2]
# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(width, 150)
kernel_y = cv2.getGaussianKernel(height, 150)
kernel = kernel_y * kernel_x.T
mask = 255 * kernel / np.linalg.norm(kernel)
test = img.copy()
for i in range(3):
test[:, :, i] = test[:, :, i] / mask
hsv = cv2.cvtColor(test, cv2.COLOR_BGR2HSV)
hsv = np.array(hsv, dtype = np.float64)
hsv[:,:,1] = hsv[:,:,1]*1.3 ## scale pixel values up or down for channel 1(Lightness)
hsv[:,:,1][hsv[:,:,1]>255] = 255
hsv[:,:,2] = hsv[:,:,2]*1.3 ## scale pixel values up or down for channel 1(Lightness)
hsv[:,:,2][hsv[:,:,2]>255] = 255
hsv = np.array(hsv, dtype = np.uint8)
test = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('Original_bright', original)
cv2.imshow('Original_dark', img)
cv2.imshow('Result', test)
cv2.waitKey(0)
cv2.destroyAllWindows()
The result compared with the original bright image.
How the result would have looked like without the inverse vignette filter.

remove foreground from segmented image

i have trained a model to provide the segment in image and the output image looks like that
the original image is like that
i have tried opencv to subtract the two images by
image1 = imread("cristiano-ronaldo.jpg")
image2 = imread("cristiano-ronaldo_seg.png")
image3 = cv2.absdiff(image1,image2)
but the output is not what i need , i would like to have cristiano and white background , how i can achieve that
Explanation:
As your files have already the right shape (BGR) and (A) it is very easy to accomplish what you are trying to do, here are the steps.
1) Load original image as BGR (In opencv it's reversed rgb)
2) Load "mask" image as a single Channel A
3) Merge the original images BGR channel and consume your mask image as A Alpha
Code:
import numpy as np
import cv2
# Load an color image in grayscale
img1 = cv2.imread('ronaldo.png',3) #READ BGR
img2 = cv2.imread('ronaldoMask.png',0) #READ AS ALPHA
kernel = np.ones((2,2), np.uint8) #Create Kernel for the depth
img2 = cv2.erode(img2, kernel, iterations=2) #Erode using Kernel
width, height, depth = img1.shape
combinedImage = cv2.merge((img1, img2))
cv2.imwrite('ronaldocombine.png',combinedImage)
Output:
After read the segment image, convert to grayscale, then threshold it to get fg-mask and bg-mask. Then use cv2.bitwise_and to "crop" the fg or bg as you want.
#!/usr/bin/python3
# 2017.11.26 09:56:40 CST
# 2017.11.26 10:11:40 CST
import cv2
import numpy as np
## read
img = cv2.imread("img.jpg")
seg = cv2.imread("seg.png")
## create fg/bg mask
seg_gray = cv2.cvtColor(seg, cv2.COLOR_BGR2GRAY)
_,fg_mask = cv2.threshold(seg_gray, 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
_,bg_mask = cv2.threshold(seg_gray, 0, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
## convert mask to 3-channels
fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR)
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR)
## cv2.bitwise_and to extract the region
fg = cv2.bitwise_and(img, fg_mask)
bg = cv2.bitwise_and(img, bg_mask)
## save
cv2.imwrite("fg.png", fg)
cv2.imwrite("bg.png", bg)

Blend overlapping images in python

I am taking two images in python and overlapping the first image onto the second image. What I would like to do is blend the images where they overlap. Is there a way to do this in python other than a for loop?
PIL has a blend function which combines two RGB images with a fixed alpha:
out = image1 * (1.0 - alpha) + image2 * alpha
However, to use blend, image1 and image2 must be the same size.
So to prepare your images you'll need to paste each of them into a new image of
the appropriate (combined) size.
Since blending with alpha=0.5 averages the RGB values from both images equally,
we need to make two versions of the panorama -- one with img1 one top and one with img2 on top. Then regions with no overlap have RGB values which agree (so their averages will remain unchanged) and regions of overlap will get blended as desired.
import operator
from PIL import Image
from PIL import ImageDraw
# suppose img1 and img2 are your two images
img1 = Image.new('RGB', size=(100, 100), color=(255, 0, 0))
img2 = Image.new('RGB', size=(120, 130), color=(0, 255, 0))
# suppose img2 is to be shifted by `shift` amount
shift = (50, 60)
# compute the size of the panorama
nw, nh = map(max, map(operator.add, img2.size, shift), img1.size)
# paste img1 on top of img2
newimg1 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg1.paste(img2, shift)
newimg1.paste(img1, (0, 0))
# paste img2 on top of img1
newimg2 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg2.paste(img1, (0, 0))
newimg2.paste(img2, shift)
# blend with alpha=0.5
result = Image.blend(newimg1, newimg2, alpha=0.5)
img1:
img2:
result:
If you have two RGBA images here is a way to perform alpha compositing.
If you'd like a soft edge when stitching two images together you could blend them with a sigmoid function.
Here is a simple grayscale example:
import numpy as np
import matplotlib.image
import math
def sigmoid(x):
y = np.zeros(len(x))
for i in range(len(x)):
y[i] = 1 / (1 + math.exp(-x[i]))
return y
sigmoid_ = sigmoid(np.arange(-1, 1, 1/50))
alpha = np.repeat(sigmoid_.reshape((len(sigmoid_), 1)), repeats=100, axis=1)
image1_connect = np.ones((100, 100))
image2_connect = np.zeros((100, 100))
out = image1_connect * (1.0 - alpha) + image2_connect * alpha
matplotlib.image.imsave('blend.png', out, cmap = 'gray')
If you blend white and black squares result will look something like that:
+ =

Categories

Resources