How to write convert jpeg to png on python? - python

I need to write script in Python, like this with options:
Enable PNG transparency
Treat similar colors as transparent. 5%
How can I do this?
Potential solutions
ffmpeg, imagemagick

Here is one way to do that in Python/OpenCV
Read the input
Define the desired color (blue) to be made transparent
Define the tolerance amount (5%)
Create upper and lower bounds on the color according to the tolerance
Threshold on color and invert
Put the threshold result into the alpha channel of the input
Save the results
Input:
import cv2
import numpy as np
# load image
img = cv2.imread('barn.jpg')
# specify blue color
color = (230,160,120)
b = color[0]
g = color[1]
r = color[2]
# specify tolerance as percent
tol = 5
tol = 5/100
# make lower and upper bounds as color minus/plus 5%
bl = int((1-tol) * b)
gl = int((1-tol) * g)
rl = int((1-tol) * r)
lower = (bl,gl,rl)
bu = int((1+tol) * b)
gu = int((1+tol) * g)
ru = int((1+tol) * r)
upper = (bu,gu,ru)
# threshold on color and invert
mask = cv2.inRange(img, lower, upper)
mask = 255 - mask
# put mask into alpha channel
result = img.copy()
result = cv2.cvtColor(result, cv2.COLOR_BGR2BGRA)
result[:, :, 3] = mask
# save resulting masked image
cv2.imwrite('barn_transp_blue.png', result)
# display result, though it won't show transparency
cv2.imshow("MASK", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Transparent result:

img = Image.open(images_dir +'/'+ filename)
img.save(create_path +'/'+ filename.split('.')[0]+'.png', 'png')

Related

adjust the V value of an HSV image to controle the brightness on a data set [duplicate]

I have a sequence of images. I need to average brightness of these images.
First example (very slow):
img = cv2.imread('test.jpg') #load rgb image
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
for x in range(0, len(hsv)):
for y in range(0, len(hsv[0])):
hsv[x, y][2] += value
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite("image_processed.jpg", img)
Second example (quickly)
hsv += value
This example very fast but it changes all values HSV (I need to change only V (brightness))
I know this question is a bit old, but I thought I might post the complete solution that worked for me (takes care of the overflow situation by saturating at 255):
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
This can be used as follows:
frame = increase_brightness(frame, value=20)
The other answers suggest doing the saturation "by hand" using all kinds of numpy magic, but you can also use cv2.add() and let OpenCV handle that for you:
import cv2
import numpy as np
image = cv2.read('image.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
value = 42 #whatever value you want to add
cv2.add(hsv[:,:,2], value, hsv[:,:,2])
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite('out.png', image)
Slice to select just the third channel and then modify those elements -
hsv[:,:,2] += value
This was my solution to both increase and decrease brightness. Was having some error issues with a couple of the other answers. Function takes in a positive or negative value and alters brightness.
example in code
img = cv2.imread(path_to_image)
img = change_brightness(img, value=30) #increases
img = change_brightness(img, value=-30) #decreases
function being called
def change_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v = cv2.add(v,value)
v[v > 255] = 255
v[v < 0] = 0
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
Iterating over the whole image to make changes is not a very scalable option in opencv, Opencv provides a lot of methods and functions to perform the arithmetic operations on the given image.
You may simply split the converted HSV image in the individual channels and then process the V channel accordingly as:
img = cv2.imread('test.jpg') #load rgb image
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
h, s, v = cv2.split(hsv)
v += 255
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite("image_processed.jpg", img)
def change_brightness(img, alpha, beta):
return cv2.addWeighted(img, alpha, np.zeros(img.shape, img.dtype),0, beta)
Here alpha & beta are input parameters. Each pixel of the input image will change according to this formula.
alpha(pixel_value) + beta.
Lower value of alpha like 2 or 3 is good
Hope this is useful for someone
#Divakar answer Python, OpenCV: Increasing image brightness without overflowing UINT8 array
mImage = cv2.imread('image1.jpg')
hsvImg = cv2.cvtColor(mImage,cv2.COLOR_BGR2HSV)
value = 0
vValue = hsvImg[...,2]
hsvImg[...,2] = np.where((255-vValue)<value,255,vValue+value)
plt.subplot(111), plt.imshow(cv2.cvtColor(hsvImg,cv2.COLOR_HSV2RGB))
plt.title('brightened image'), plt.xticks([]), plt.yticks([])
plt.show()
To decrease the brightness
mImage = cv2.imread('image1.jpg')
hsvImg = cv2.cvtColor(mImage,cv2.COLOR_BGR2HSV)
# decreasing the V channel by a factor from the original
hsvImg[...,2] = hsvImg[...,2]*0.6
plt.subplot(111), plt.imshow(cv2.cvtColor(hsvImg,cv2.COLOR_HSV2RGB))
plt.title('brightened image'), plt.xticks([]), plt.yticks([])
plt.show()
import cv2
import numpy as np
image = cv2.imread('image.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
increase = 100
v = image[:, :, 2]
v = np.where(v <= 255 - increase, v + increase, 255)
image[:, :, 2] = v
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
cv2.imshow('Brightness', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Fastest?
For raw speed, just add a positive or negative integer to the original BGR image is probably the fastest. But you'll want to use an OpenCV function to avoid overflows. convertScaleAbs is a good choice. We use the reference "mandrill" image from USC SIPI:
import cv2
def fast_brightness(input_image, brightness):
''' input_image: color or grayscale image
brightness: -255 (all black) to +255 (all white)
returns image of same type as input_image but with
brightness adjusted'''
img = input_image.copy()
cv2.convertScaleAbs(img, img, 1, brightness)
return img
img = cv2.imread('mandrill.tiff',cv2.IMREAD_COLOR)
cv2.imwrite('output.jpg', fast_brightness(img, 100))
which gives (for brightness value of 100)
Like Photoshop
For a brightness function more like Photoshop, The Gimp, or other image processing programs, you can use a function similar to #md-hanif-ali-sohag or the one in this answer:
def photoshop_brightness(input_img, brightness = 0):
''' input_image: color or grayscale image
brightness: -127 (all black) to +127 (all white)
returns image of same type as input_image but with
brightness adjusted
'''
img = input_img.copy()
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow)/255
gamma_b = shadow
cv2.convertScaleAbs(input_img, img, alpha_b, gamma_b)
return img
Timing
I timed it for 1,000 iterations of running each function. And surprisingly, the times are nearly identical
elapsed fast_brightness [sec]: 0.8595983982086182
elapsed photoshop_brightness [sec]: 0.8565976619720459
Might be too old but I use cv.covertTo which works for me
Mat resultBrightImage;
origImage.convertTo(resultBrightImage, -1, 1, percent); // Where percent = (int)(percent_val/100)*255, e.g., percent = 50 to increase brightness by 50%
convertTo uses saturate_cast at the end to avoid any overflows. I don't use Python and the above is in C++ but I hope it is easily convertible in Python and hope it helps
You can use this function to change your desired brightness or contrast using C++ just like the same way you do it on photoshop or other similar photo editing software.
def apply_brightness_contrast(input_img, brightness = 255, contrast = 127):
brightness = map(brightness, 0, 510, -255, 255)
contrast = map(contrast, 0, 254, -127, 127)
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow)/255
gamma_b = shadow
buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)
else:
buf = input_img.copy()
if contrast != 0:
f = float(131 * (contrast + 127)) / (127 * (131 - contrast))
alpha_c = f
gamma_c = 127*(1-f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
cv2.putText(buf,'B:{},C:{}'.format(brightness,contrast),(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
return buf
def map(x, in_min, in_max, out_min, out_max):
return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min)
After that you need to call the functions by creating trackbar using cv2.createTrackbar() and call that above functions with proper parameters as well. In order to map the brightness values which ranges from -255 to +255 and contrast values -127 to +127, you can use that map() function. You can check the full details of about python implementation here.
An OpenCV image is a numpy array of data type numpy.uint8. The problem with adding an arbitrary value to any of the channels is that an overflow can easily occur. For example, numpy.uint8(255) + numpy.uint8(1) = 0. To avoid this problem, we first convert our BGR image to HLS. Then we convert our HLS image (a numpy.uint8 array) to numpy.int16, we add a lightness value to the second channel, push any entries above 255 in the lightness channel down to 255, and push any entries below 0 up to 0. Now all the values v in the lightness channel satisfy 0 <= v <=255. At this point we can convert back to numpy.uint8, and then convert to BGR.
import cv2 as cv
import numpy as np
# Negative values for the percentage parameter are allowed
def increase_brightness(bgr_img, percentage):
hls_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2HLS)
value = np.int16(255*percentage/100)
hls_arr_16bit = np.int16(hls_img)
hls_arr_16bit[:,:,1] += value
if percentage > 0:
hls_arr_16bit[:,:,1] = np.where(hls_arr_16bit[:,:,1] <= 255, hls_arr_16bit[:,:,1], np.int16(255))
elif percentage < 0:
hls_arr_16bit[:,:,1] = np.where(hls_arr_16bit[:,:,1] >= 0, hls_arr_16bit[:,:,1], np.int16(0))
hls_img = np.uint8(hls_arr_16bit)
brightened_bgr_img = cv.cvtColor(hls_img, cv.COLOR_HLS2BGR)
return brightened_bgr_img
img = cv.imread('path\\to\\image.jpg')
mod_img = increase_brightness(img)
cv.imwrite('path\\to\\modified_image.jpg', mod_img)
I know this shouldn't be so hard and there to adjust the brightness of an image. Also, there are already plenty of great answers. I would like to enhance the answer of #BillGrates, so it works on grayscale images and with decreasing the brightness: value = -255 creates a black image whereas value = 255 a white one.
def adjust_brightness(img, value):
num_channels = 1 if len(img.shape) < 3 else 1 if img.shape[-1] == 1 else 3
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if num_channels == 1 else img
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if value >= 0:
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
else:
value = int(-value)
lim = 0 + value
v[v < lim] = 0
v[v >= lim] -= value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if num_channels == 1 else img
return img
HSV channels are uint8 type, hue value range is [0, 179]. Therefore, when add with a large number or a negative number, Python returns a garbage result. So in hue channel we need to change to int16 type and then back to uint8 type. On saturation (S), and value (V) channels, the same problem occurs, so we need to check the value before adding or subtracting.
Here is my solution for random hue, saturation, and value shifting. It base on #alkasm, and #bill-grates code sample.
def shift_channel(c, amount):
if amount > 0:
lim = 255 - amount
c[c >= lim] = 255
c[c < lim] += amount
elif amount < 0:
amount = -amount
lim = amount
c[c <= lim] = 0
c[c > lim] -= amount
return c
rand_h, rand_s, rand_v = 50, 50, 50
img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_hsv)
# Random shift hue
shift_h = random.randint(-rand_h, rand_h)
h = ((h.astype('int16') + shift_h) % 180).astype('uint8')
# Random shift saturation
shift_s = random.randint(-rand_s, rand_s)
s = shift_channel(s, shift_s)
# Random shift value
shift_v = random.randint(-rand_v, rand_v)
v = shift_channel(v, shift_v)
shift_hsv = cv2.merge([h, s, v])
print(shift_h, shift_s, shift_v)
img_rgb = cv2.cvtColor(shift_hsv, cv2.COLOR_HSV2RGB)

Increasing Intensity of Certain Image Areas in OpenCV

I currently have the following image and the salience map below which reflects the attention areas of the first image:
Both of them are the same size. What I am trying to do is amplify the region of areas that are very white in the salient region. For example, the eyes, collar and hair would be a bit more highlighted. I have the following code which I have tried to split the image into h, s, v and then multiply through but the output is black and white. Any help is greatly appreciated:
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
dimensions = (384, 384)
saliencyMap = cv2.resize(saliencyMap, dimensions)
s1 = s * saliencyMap.astype(s.dtype)
hsv_image = cv2.merge([h, s1, v])
out = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
cv2.imshow('example', out)
cv2.waitKey()
Here is how to do that in Python/OpenCV. Add the two images (from your other post). Modify the mask to have values near a mean of mid-gray. Separate the image into H,S,V channels. Apply the mask to the Saturation channel doing hard light composition. Combine the new saturation with the old hue and value channels and convert back to BGR.
Input 1:
Input 2:
Mask:
import cv2
import numpy as np
# read image 1
img1 = cv2.imread('img1.png')
hh, ww = img1.shape[:2]
# read image 2 and resize to same size as img1
img2 = cv2.imread('img2.png')
img2 = cv2.resize(img2, (ww,hh))
# read saliency mask as grayscale and resize to same size as img1
mask = cv2.imread('mask.png')
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask = cv2.resize(mask, (ww,hh))
# add img1 and img2
img12 = cv2.add(img1, img2)
# get mean of mask and shift mean to mid-gray
# desirable for hard light compositing
# add bias as needed
mean = np.mean(mask)
bias = -32
shift = 128 - mean + bias
mask = cv2.add(mask, shift)
# threshold mask at mid gray and convert to 3 channels
# (needed to use as src < 0.5 "if" condition in hard light)
thresh = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)[1]
# convert img12 to hsv
hsv = cv2.cvtColor(img12, cv2.COLOR_BGR2HSV)
# separate channels
hue,sat,val = cv2.split(hsv)
# do hard light composite of saturation and mask
# see CSS specs at https://www.w3.org/TR/compositing-1/#blendinghardlight
satf = sat.astype(np.uint8)/255
maskf = mask.astype(np.uint8)/255
threshf = thresh.astype(np.uint8)/255
threshf_inv = 1 - threshf
low = 2.0 * satf * maskf
high = 1 - 2.0 * (1-satf) * (1-maskf)
new_sat = ( 255 * (low * threshf_inv + high * threshf) ).clip(0, 255).astype(np.uint8)
# combine new_sat with old hue and val
result = cv2.merge([hue,new_sat,val])
# save results
cv2.imwrite('img12_sat_hardlight.png', result)
# show results
cv2.imshow('img12', img12)
cv2.imshow('mask', mask)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

How to find the padding size of an image?

I have a dicom image but the image is padded. I have code to remove the padding from the image so that only the scan is left but I have to open the image using ImageJ and manually find min and max values for the x and y axis for where the image starts and ends. The scan has a gray value range of -3000 to 2000. The padded area has a value of 0. Is there a way to find these min and max values without having do it manually?
Original Image:
Desired Image:
Below a Python script using SimpleITK that crops out the background.
The basic idea is that it creates a mask image of pixels that are not the background value. Then it uses SimpleITK's LabelShapeStatisticsImageFilter to find the bounding box for the non-zero pixels in that mask image.
import SimpleITK as sitk
img = sitk.ReadImage("padded-image.png")
# Grey background in this example
bg_value = 161
# Create a mask image that is just non-background pixels
fg_mask = (img != bg_value)
# Compute shape statistics on the mask
lsif = sitk.LabelShapeStatisticsImageFilter()
lsif.Execute(fg_mask)
# Get the bounds of the mask.
# Bounds are given as [Xstart, Ystart, Xwidth, Ywidth]
bounds = lsif.GetBoundingBox(1)
print(bounds)
Xmin_crop = bounds[0]
Ymin_crop = bounds[1]
Xmax_crop = img.GetWidth() - (bounds[0]+bounds[2])
Ymax_crop = img.GetHeight() - (bounds[1]+bounds[3])
# Crop parameters are how much to crop off each side
cropped_img = sitk.Crop(img, [Xmin_crop, Ymin_crop], [Xmax_crop, Ymax_crop])
sitk.Show(cropped_img)
sitk.WriteImage(cropped_img, "cropped-image.png")
Because I used your 8-bit PNG image, the background value is set to 161. If you use your original 16-bit DICOM CT, you'd use a background value of 0. SimpleITK can read DICOM, along with a number of other image formats.
For more info about the LabelShapeStatisticsImageFilter class, here's the documentation: https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1LabelShapeStatisticsImageFilter.html#details
Here is an alternate way in Python/OpenCV using color thresholding and contours to find the bounding box.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('scan.png')
# threshold on gray color (161,161,161)
lower = (161,161,161)
upper = (161,161,161)
thresh = cv2.inRange(img, lower, upper)
# invert threshold image so border is black and center box is white
thresh = 255 - thresh
# get external contours (presumably just one)
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cntr = contours[0]
x,y,w,h = cv2.boundingRect(cntr)
# crop to bounding rectangle
crop = img[y:y+h, x:x+w]
# save cropped image
cv2.imwrite('scan_thresh.png',thresh)
cv2.imwrite('scan_crop.png',crop)
cv2.imshow("THRESH", thresh)
cv2.imshow("CROP", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Cropped Result:
Without having to resort to something as complex (and large/slow to import) as SITK or CV with complicated image analysis - you can do it easily just using numpy.
That's going to be a lot faster and reliable IMHO:
# if a is your image:
same_cols = np.all(a == a[0, :], axis=0)
same_cols_index = np.where(same_cols==False)[0]
C0,C1 = same_cols_index[0], same_cols_index[-1] + 1
same_rows = np.all(a == a[:, 0], axis=1)
same_rows_index = np.where(same_rows==False)[0]
R0,R1 = same_rows_index[0], same_rows_index[-1] + 1
print('rows', R0, R1)
print('cols', C0, C1)
a_snipped = a[R0:R1, C0:C1]
The logic here is
Find all rows and columns that have all values the same as the first row or column. You could replace that to be all rows/cols with value == 0 if you want
Get the indexes of the rows/columns from (1) where they are not all the same (ie == False)
Get the first and last index where they aren't all the same
Use the row/column first and last indicies to get the corresponding slice of your array (note you need to add 1 to the last index to include it in the slice)
Example
# make a sample image
a = np.zeros((512,512), dtype=np.int32)
r0, r1 = 53, 421
c0, c1 = 43, 470
rnd = np.random.randint(-3000, 2000, (r1-r0, c1-c0))
a[r0:r1, c0:c1] = rnd
plt.imshow(a, cmap='gray', vmin=-50, vmax=50)
same_cols = np.all(a == a[0, :], axis=0)
same_cols_index = np.where(same_cols==False)[0]
C0,C1 = same_cols_index[0], same_cols_index[-1] + 1
same_rows = np.all(a == a[:, 0], axis=1)
same_rows_index = np.where(same_rows==False)[0]
R0,R1 = same_rows_index[0], same_rows_index[-1] + 1
print('rows', R0, R1)
print('cols', C0, C1)
a_snipped = a[R0:R1, C0:C1]
plt.imshow(a_snipped, cmap='gray', vmin=-3000, vmax=2000)
rows 53 421
cols 43 470

Remove vignette filter of colored image

I am new to Python OpenCV image processing. I want to remove the border/outline shadow of images as shown below. I checked 'how to remove shadow from scanned images' which does not work for me. Is this even possible?
Your problem of border/outline shadows reminded me of the vignette filter. You can have a look at this question if you want to know more about it. So essentially our task to remove the effect of the vignette filter and then increase brightness.
#####VIGNETTE
import cv2
import numpy as np
img = cv2.imread('Paris.jpg')
height, width = img.shape[:2]
original = img.copy()
# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(width, 150)
kernel_y = cv2.getGaussianKernel(height, 150)
kernel = kernel_y * kernel_x.T
mask = 255 * kernel / np.linalg.norm(kernel)
# applying the mask to each channel in the input image
for i in range(3):
img[:, :, i] = img[:, :, i] * mask
cv2.imshow('Original', original)
cv2.imshow('Vignette', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
To counter the effect change img[:, :, i] = img[:, :, i] * mask to img[:, :, i] = img[:, :, i] / mask
Now we need to increase the brightness of the image. For this, we will convert the image to HSV and increase the values of saturation and value matrices. To know about it in more detail you can refer to this article.
#THE FULL CODE
import cv2
import numpy as np
img = cv2.imread('shadow.jpg')
original = cv2.imread('bright.jpg')
height, width = img.shape[:2]
# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(width, 150)
kernel_y = cv2.getGaussianKernel(height, 150)
kernel = kernel_y * kernel_x.T
mask = 255 * kernel / np.linalg.norm(kernel)
test = img.copy()
for i in range(3):
test[:, :, i] = test[:, :, i] / mask
hsv = cv2.cvtColor(test, cv2.COLOR_BGR2HSV)
hsv = np.array(hsv, dtype = np.float64)
hsv[:,:,1] = hsv[:,:,1]*1.3 ## scale pixel values up or down for channel 1(Lightness)
hsv[:,:,1][hsv[:,:,1]>255] = 255
hsv[:,:,2] = hsv[:,:,2]*1.3 ## scale pixel values up or down for channel 1(Lightness)
hsv[:,:,2][hsv[:,:,2]>255] = 255
hsv = np.array(hsv, dtype = np.uint8)
test = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('Original_bright', original)
cv2.imshow('Original_dark', img)
cv2.imshow('Result', test)
cv2.waitKey(0)
cv2.destroyAllWindows()
The result compared with the original bright image.
How the result would have looked like without the inverse vignette filter.

Imitating Photoshop's layer mask

I'm trying to remove the background of an image using a mask where the alpha value of a pixel is proportional to the black intensity. For instance, given the following input image and mask, the result contains "faded" areas:
Result:
Note the faded areas. Basically I'm trying to imitate the layer mask function in Photoshop.
I'm able to turn the mask into alpha using binary threshold, but I wonder how to make the alpha proportional. The code for binary threshold is as follows:
mask = cv2.imread(mask_path, 0)
mask2 = np.where(mask<50, 0, 1).astype('uint8')
img = img * mask2[:, :, np.newaxis]
_, alpha = cv2.threshold(mask2, 0, 255, cv2.THRESH_BINARY)
png = np.dstack((img, alpha))
cv2.imwrite(dest_path, png)
I suppose it may perhaps be irrelevant as thresholds are probably not needed for layer masking.
I'm not sure if this is what you want, but you can get the proportional effect by subtracting the values of the mask from the image. That means you have to invert the mask, so the amount of alpha you want to remove is white. For subtract(), the input arrays need to have the same size, so convert the inverted mask to 3 color channels. If the size of the mask is not equal to the background image, you'll first have to create a subimage.
import cv2
import numpy as np
# load background image
img = cv2.imread('grass.jpg')
# load alpha mask as grayscale
mask = cv2.imread('a_mask.jpg',0)
# invert mask and convert to 3 color channels
mask = cv2.bitwise_not(mask)
fullmask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
# create a subimage with the size of the mask
xOffset = 30
yOffset = 30
height, width = mask.shape[:2]
subimg = img[yOffset:yOffset+height,xOffset:xOffset+width]
#subtract mask values from subimage
res = cv2.subtract(subimg,fullmask)
# put subimage back in background
img[yOffset:yOffset+height,xOffset:xOffset+width] = res
#display result
cv2.imshow('Result',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Categories

Resources