Combining foreground png image with jpg background - python

I've bellow function:
def alphaMerge(small_foreground, background, top, left):
result = background.copy()
fg_b, fg_g, fg_r, fg_a = cv.split(small_foreground)
print(fg_b, fg_g, fg_r, fg_a)
fg_a = fg_a / 255.0
label_rgb = cv.merge([fg_b * fg_a, fg_g * fg_a, fg_r * fg_a])
height, width = small_foreground.shape[0], small_foreground.shape[1]
part_of_bg = result[top:top + height, left:left + width, :]
bg_b, bg_g, bg_r = cv.split(part_of_bg)
part_of_bg = cv.merge([bg_b * (1 - fg_a), bg_g * (1 - fg_a), bg_r * (1 - fg_a)])
cv.add(label_rgb, part_of_bg, part_of_bg)
result[top:top + height, left:left + width, :] = part_of_bg
return result
if __name__ == '__main__':
folder_dir = r"C:\photo_datasets\products_small"
logo = cv.imread(r"C:\Users\PiotrSnella\photo_datasets\discount.png", cv.IMREAD_UNCHANGED)
for images in os.listdir(folder_dir):
input_path = os.path.join(folder_dir, images)
image_size = os.stat(input_path).st_size
if image_size < 8388608:
img = cv.imread(input_path, cv.IMREAD_UNCHANGED)
height, width, channels = img.shape
if height > 500 and width > 500:
result = alphaMerge(logo, img, 0, 0)
cv.imwrite(r'C:\photo_datasets\products_small_output_cv\{}.png'.format(images), result)
I want to combine two pictures, one with the logo which I would like to apply on full dataset from folder products_small. I'm getting a error part_of_bg = cv.merge([bg_b * (1 - fg_a), bg_g * (1 - fg_a), bg_r * (1 - fg_a)]) ValueError: operands could not be broadcast together with shapes (720,540) (766,827)
I tried other combining options and still get the error about problem with shapes, the photo could be a problem or something with the code?
Thank you for your help guys :)

Here is one way to do that in Python/OpenCV. I will place a 20% resized logo onto the pants image at coordinates 660,660 on the right side pocket.
Read the background image (pants)
Read the foreground image (logo) unchanged to preserve the alpha channel
Resize the foreground (logo) to 20%
Create a transparent image the size of the background image
Insert the resized foreground (logo) into the transparent image at the desired location
Extract the alpha channel from the inserted, resized foreground image
Extract the base BGR channels from the inserted, resized foreground image
Blend the background image and the base BGR image using the alpha channel as a mask using np.where(). Note all images must be the same dimensions and 3 channels
Save the result
Background Image:
Foreground Image:
import cv2
import numpy as np
# read background image
bimg = cv2.imread('pants.jpg')
hh, ww = bimg.shape[:2]
# read foreground image
fimg = cv2.imread('flashsale.png', cv2.IMREAD_UNCHANGED)
# resize foreground image
fimg_small = cv2.resize(fimg, (0,0), fx=0.2, fy=0.2)
ht, wd = fimg_small.shape[:2]
# create transparent image
fimg_new = np.full((hh,ww,4), (0,0,0,0), dtype=np.uint8)
# insert resized image into transparent image at desired coordinates
fimg_new[660:660+ht, 660:660+wd] = fimg_small
# extract alpha channel from foreground image as mask and make 3 channels
alpha = fimg_new[:,:,3]
alpha = cv2.merge([alpha,alpha,alpha])
# extract bgr channels from foreground image
base = fimg_new[:,:,0:3]
# blend the two images using the alpha channel as controlling mask
result = np.where(alpha==(0,0,0), bimg, base)
# save result
cv2.imwrite("pants_flashsale.png", result)
# show result
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result:

This just requires some multiplication and subtraction.
Your overlay has an actual alpha channel, not just a boolean mask. You should use it. It makes edges look better than just a hard boolean mask.
I see one issue with your overlay: it doesn't have any "shadow" to give the white text contrast against a potentially white background.
When you resize RGBA data, it's not trivial. You'd better export the graphic from your vector graphics program in the desired resolution in the first place. Resizing after the fact requires operations to make sure partially transparent pixels (neither 100% opaque nor 100% transparent) are calculated properly so undefined "background" from the fully transparent areas of the overlay image is not mixed into those partially transparent pixels.
base = cv.imread("U3hRd.jpg")
overlay = cv.imread("OBxGQ.png", cv.IMREAD_UNCHANGED)
(bheight, bwidth) = base.shape[:2]
(oheight, owidth) = overlay.shape[:2]
print("base:", bheight, bwidth)
print("overlay:", oheight, owidth)
# place overlay in center
#ox = (bwidth - owidth) // 2
#oy = (bheight - oheight) // 2
# place overlay in top left
ox = 0
oy = 0
overlay_color = overlay[:,:,:3]
overlay_alpha = overlay[:,:,3] * np.float32(1/255)
# "unsqueeze" (insert 1-sized color dimension) so numpy broadcasting works
overlay_alpha = np.expand_dims(overlay_alpha, axis=2)
composite = base.copy()
base_roi = base[oy:oy+oheight, ox:ox+owidth]
composite_roi = composite[oy:oy+oheight, ox:ox+owidth]
composite_roi[:,:] = overlay_color * overlay_alpha + base_roi * (1 - overlay_alpha)

This is what you wanted on top left corner. Noticed, the logo on white foreground doesn't work on background on pant.jpg.
Just 17 lines of codes compared to
import cv2
import numpy as np
img1 = cv2.imread('pant.jpg')
overlay_img1 = np.ones(img1.shape,np.uint8)*255
img2 = cv2.imread('logo3.png')
rows,cols,channels = img2.shape
overlay_img1[0:rows, 0:cols ] = img2
img2gray = cv2.cvtColor(overlay_img1,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray,220,255,cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
temp1 = cv2.bitwise_and(img1,img1,mask = mask_inv)
temp2 = cv2.bitwise_and(overlay_img1,overlay_img1, mask = mask)
result = cv2.add(temp1,temp2)
cv2.imshow("Result",result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Logo resize(320x296):

Related

Resize image to use as a blurred background but with a zoom aspect in relation to the original image

The original image is this:
Using this website tool with these settings:
The final result that I am trying to reproduce (the only difference is that I'm going to add a readjustment to the image that is on top, have 1080 width too) here is this:
It is clear that there is a zoom effect in the blurred image, so when I use resize to keep the aspect ratio and the highest quality possible, I use im = im.resize((1080,1080), resample=Image.Resampling.LANCZOS), but as I don't want that, I removed resample=Image.Resampling.LANCZOS imagining that the image would be generated without proportion generating a zoom:
from PIL import Image, ImageFilter, ImageChops
import numpy
import cv2
def remove_border(file_img):
im = Image.open(file_img)
bg = Image.new("RGB", im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im.convert("RGB"), bg)
diff = ImageChops.add(diff, diff, 2.0, -30)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
def resize_blur(img_blur,sizers):
img_blur = img_blur.resize(sizers, resample=Image.Resampling.LANCZOS)
img_blur = img_blur.filter(ImageFilter.GaussianBlur(10))
return img_blur
def resize_width_main(img_border,size_width):
img_width = img_border
basewidth = size_width
wpercent = (basewidth/float(img_width.size[0]))
hsize = int((float(img_width.size[1])*float(wpercent)))
img_width = img_width.resize((basewidth,hsize), Image.Resampling.LANCZOS)
return img_width
def center_overlay(name_file,overlay,background):
img = numpy.asarray(overlay)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
back = numpy.asarray(background)
back = cv2.cvtColor(back, cv2.COLOR_RGB2BGR)
hh, ww = back.shape[:2]
yoff = round((hh-h)/2)
xoff = round((ww-w)/2)
result = back.copy()
result[yoff:yoff+h, xoff:xoff+w] = img
cv2.imwrite(name_file, result)
def main():
img_border = remove_border('resized_download.png')
img_blur = resize_blur(img_border, (1080,1080))
img_width = resize_width_main(img_border, 1080)
center_overlay('resized.png', img_width, img_blur)
if __name__ == '__main__':
main()
But the current result is this:
A few observations.
Your remove_border function isn't doing a lot; when I tried it with your test image, all it did was remove 9 pixels from the left side. Maybe it has a bug. At the risk of ignoring the lesson of Chesterton's Fence I'd say you don't need it and could eliminate it entirely.
Blurring before you resize makes more sense than doing it the other way around.
When you resize a blurred image, you don't need to worry much about the quality of the resampling filter. I wouldn't use NEAREST, but BILINEAR should be perfectly adequate and reasonably fast.
When you resize an image, the old and new size should have the same aspect ratio or you will get distortion. The resample method makes no difference in this. You are resizing a rectangle into a square, resulting in bad distortion. You can crop your original to the desired aspect ratio before you resize, or crop it after you resize. It will be more efficient to crop first.
def resize_to_target(im, target_size, resample=Image.BILINEAR):
''' Resize an image to a target size. If the aspect ratio
of the original image is different from the target, the
image will be cropped to the destination aspect ratio
before resizing.
'''
if (im.size[0] / im.size[1]) < (target_size[0] / target_size[1]):
# if (im.size[0] * target_size[1]) != (target_size[0] * im.size[1]):
# Existing image is narrower, crop top and bottom
crop_height = round(im.size[0] * target_size[1] / target_size[0])
if crop_height < im.size[1]:
top = (im.size[1] - crop_height) // 2
bottom = top + crop_height
im = im.crop((0, top, im.size[0], bottom))
else:
# existing image is wider, crop left and right
crop_width = round(im.size[1] * target_size[0] / target_size[1])
if crop_width < im.size[0]:
left = (im.size[0] - crop_width) // 2
right = left + crop_width
im = im.crop((left, 0, right, im.size[1]))
return im.resize(target_size, resample=resample)

OpenCV: Highlights Adjustment Without Affecting Shadows

I originally attempted to use the script located here to darken the highlights of a captured 8mm film frame without affecting the shadows.
I never call the script with parameters that returned an acceptable improvement in the image. I have since attempted to try this code:
import numpy as np
import cv2
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def gammaCorrection(src, gamma):
invGamma = 1 / gamma
table = [((i / 255) ** invGamma) * 255 for i in range(256)]
table = np.array(table, np.uint8)
return cv2.LUT(src, table)
image = cv2.imread(r'X:\temp2\Test00001-02.jpg')
resize = ResizeWithAspectRatio(image, width=1280) # Resize by width OR
# resize = ResizeWithAspectRatio(image, height=1280) # Resize by height
cv2.imshow('Image', resize)
#gammaImg = gammaCorrection(resize, 0.3)
#cv2.imshow('Gamma corrected image', gammaImg)
# Convert to HSV and take V channel
V = cv2.cvtColor(resize,cv2.COLOR_BGR2HSV)[...,2]
# Threshold V channel at 100 to make alpha channel (A)
_, A = cv2.threshold(V,150,255,cv2.THRESH_BINARY)
# Threshold V channel at 100 to make alpha channel (A)
_, A2 = cv2.threshold(V,0,255,cv2.THRESH_BINARY)
# Stack A channel onto RGB channels
result = np.dstack((resize,A))
# Stack A channel onto RGB channels
fullchannel = np.dstack((resize,A2))
gammaImg = gammaCorrection(result, 0.3)
final_im = cv2.addWeighted(fullchannel, 1, gammaImg, 0.5, 0)
# Save result
cv2.imwrite(r'X:\temp2\original_im.png',resize)
cv2.imwrite(r'X:\temp2\final_im.png',final_im)
cv2.imwrite(r'X:\temp2\gamma_adjusted.png',gammaImg)
cv2.imwrite(r'X:\temp2\fullchannel.png',fullchannel)
The following images show the output of this script.
Original Image:
Gamma Adjusted Image:
Full Channel Image:
Final Image:
The final image actually makes the highlights in the image even brighter, so there is something I am doing wrong when I merge the gamma adjusted image with the full channel image.
So, any thoughts on getting just the highlights above a set threshold to darken and not affect the darker regions of the image?

Image object reflection

Hello I want to reflect an object in the image as in this image[enter image description here][1]
[1]: https://i.stack.imgur.com/N9J3I.jpg How can I get this kind of result?
It is possible that OpenCV does not have good solutions for this, take a closer look at Pillow.
from PIL import Image, ImageFilter
def drop_shadow(image, iterations=3, border=8, offset=(5,5), background_colour=0xffffff, shadow_colour=0x444444):
shadow_width = image.size[0] + abs(offset[0]) + 2 * border
shadow_height = image.size[1] + abs(offset[1]) + 2 * border
shadow = Image.new(image.mode, (shadow_width, shadow_height), background_colour)
shadow_left = border + max(offset[0], 0)
shadow_top = border + max(offset[1], 0)
shadow.paste(shadow_colour, [shadow_left, shadow_top, shadow_left + image.size[0], shadow_top + image.size[1]])
for i in range(iterations):
shadow = shadow.filter(ImageFilter.BLUR)
img_left = border - min(offset[0], 0)
img_top = border - min(offset[1], 0)
shadow.paste(image, (img_left, img_top))
return shadow
drop_shadow(Image.open('boobs.jpg')).save('shadowed_boobs.png')
Here is one way to do the reflection in Python/OpenCV.
One flips the image. Then makes a vertical ramp (gradient) image and puts that into the alpha channel of the flipped image. Then one concatenates the original and the flipped images.
Input:
import cv2
import numpy as np
# set top and bottom opacity percentages
top = 85
btm = 15
# load image
img = cv2.imread('bear2.png')
hh, ww = img.shape[:2]
# flip the input
flip = np.flip(img, axis=0)
# add opaque alpha channel to input
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# make vertical gradient that is bright at top and dark at bottom as alpha channel for the flipped image
gtop = 255*top//100
gbtm = 255*btm//100
grady = np.linspace(gbtm, gtop, hh, dtype=np.uint8)
gradx = np.linspace(1, 1, ww, dtype=np.uint8)
grad = np.outer(grady, gradx)
grad = np.flip(grad, axis=0)
# alternate method
#grad = np.linspace(0, 255, hh, dtype=np.uint8)
#grad = np.tile(grad, (ww,1))
#grad = np.transpose(grad)
#grad = np.flip(grad, axis=0)
# put the gradient into the alpha channel of the flipped image
flip = cv2.cvtColor(flip, cv2.COLOR_BGR2BGRA)
flip[:,:,3] = grad
# concatenate the original and the flipped versions
result = np.vstack((img, flip))
# save output
cv2.imwrite('bear2_vertical_gradient.png', grad)
cv2.imwrite('bear2_reflection.png', result)
# Display various images to see the steps
cv2.imshow('flip',flip)
cv2.imshow('grad',grad)
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ramped (Gradient) Image:
Result:

how to add an alpha channel of particular value in an BGR image

I tried the below code, it doesn't show any error and runs properly, but changing the value of the alpha channel, doesn't show any change in image
img3 = cv2.cvtColor(img2, cv2.COLOR_BGR2BGRA)
img3[:,:,3] = 100
cv2.imshow('img1',img2)
cv2.imshow('img',img3)
cv2.waitKey(0)
works ok, but the output of both images are same and there is no seen-able change after applying alpha channel
i have already tried the below code
Your code is actually correct.
The simple answer is that OpenCV's imshow() ignores transparency, so if you want to see its effect, save your image as a PNG/TIFF (both of which support transparency) and view it with a different viewer - such as GIMP, Photoshop or feh.
As an alternative, I made a wrapper/decorator for OpenCV's imshow() that displays images with transparency overlaid on a chessboard like Photoshop does. So, starting with this RGBA Paddington image and this grey+alpha Paddington image:
#!/usr/bin/env python3
import cv2
import numpy as np
def imshow(title,im):
"""Decorator for OpenCV "imshow()" to handle images with transparency"""
# Check we got np.uint8, 2-channel (grey + alpha) or 4-channel RGBA image
if (im.dtype == np.uint8) and (len(im.shape)==3) and (im.shape[2] in set([2,4])):
# Pick up the alpha channel and delete from original
alpha = im[...,-1]/255.0
im = np.delete(im, -1, -1)
# Promote greyscale image to RGB to make coding simpler
if len(im.shape) == 2:
im = np.stack((im,im,im))
h, w, _ = im.shape
# Make a checkerboard background image same size, dark squares are grey(102), light squares are grey(152)
f = lambda i, j: 102 + 50*((i+j)%2)
bg = np.fromfunction(np.vectorize(f), (16,16)).astype(np.uint8)
# Resize to square same length as longer side (so squares stay square), then trim
if h>w:
longer = h
else:
longer = w
bg = cv2.resize(bg, (longer,longer), interpolation=cv2.INTER_NEAREST)
# Trim to correct size
bg = bg[:h,:w]
# Blend, using result = alpha*overlay + (1-alpha)*background
im = (alpha[...,None] * im + (1.0-alpha[...,None])*bg[...,None]).astype(np.uint8)
cv2.imshow(title,im)
if __name__ == "__main__":
# Open RGBA image
im = cv2.imread('paddington.png',cv2.IMREAD_UNCHANGED)
imshow("Paddington (RGBA)",im)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
# Open Grey + alpha image
im = cv2.imread('paddington-ga.png',cv2.IMREAD_UNCHANGED)
imshow("Paddington (grey + alpha)",im)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
And you will get this:
and this:
Keywords: Image, image processing, Python, alpha channel, transparency, overlay, checkerboard, chessboard, blend, blending. OpenCV, imshow, cv2.imshow.

python opencv panorama blacklines

I am working on panorama with Python OpenCV. Can someone show me how to get rid of the black lines in my final images? I am thinking of maybe I should first check for the color I.e. 0,0,0 before copying it to the atlas image, but I am not quite sure how to do that.
def warpTwoImages(img1, img2, H):
'''warp img2 to img1 with homograph H'''
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
pts2 = np.float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
pts2_ = cv2.perspectiveTransform(pts2, H)
pts = np.concatenate((pts1, pts2_), axis=0)
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin))
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
This answer depends on warpPrespicteve function to work with RGBA.
You can try to use the alpha channel of each image.
Before wrapping convert each image to RGBA (See the code below) were the alpha channel will be 0 for the black lines and for all other pixels it will be 255.
import cv2
import numpy as np
# Read img
img = cv2.imread('i.jpg')
# Create mask from all the black lines
mask = np.zeros((img.shape[0],img.shape[1]),np.uint8)
cv2.inRange(img,(0,0,0),(1,1,1),mask)
mask[mask==0]=1
mask[mask==255]=0
mask = mask*255
b_channel, g_channel, r_channel = cv2.split(img)
# Create a new image with 4 channels the forth channel Aplha will give the opacity for each pixel
newImage = cv2.merge((b_channel, g_channel, r_channel, mask))

Categories

Resources