I need to join two images where the base image has a transparent background, I already tried to do it using
image 01
image and a photo I need to put her in the second in the blank space
second image
expected result and this
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
dim = (425, 425)
apple = mpimg.imread('image.png')
apple = cv2.resize(apple, dim)
banana = mpimg.imread('image2.png')
banana = cv2.resize(banana, dim)
_ = plt.imshow(apple)
_ = plt.show()
_ = plt.imshow(banana)
_ = plt.show()
list_images = [apple, banana]
def blend(list_images): # Blend images equally.
equal_fraction = 1.0 / (len(list_images))
output = np.zeros_like(list_images[0])
for img in list_images:
output = output + img * equal_fraction
output = output.astype(np.uint8)
_ = plt.imshow(output)
return output
output = blend(list_images)
_ = plt.imshow(output)
You can easily make use of your alpha (transparent) channel for this purpose. Unfortunately, when I tried reading your frame image using cv2.IMREAD_UNCHANGED, it didn't have the appropriate alpha channel. According to your result, the region outside the rounded corners is in white.
So using your frame image I created the alpha channel and used it the following.
# Reading images
tr = cv2.imread('frame.png')
la = cv2.imread('sunset.jpg')
# resizing
dim = (425, 425)
tr = cv2.resize(tr, dim)
la = cv2.resize(la, dim)
# Finding the largest contour (white region) from the first channel in frame
th = cv2.threshold(tr[:,:,0],127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
contours, hierarchy = cv2.findContours(th, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
c = max(contours, key = cv2.contourArea)
# Draw the largest contour onto a mask
black = np.zeros((tr.shape[0], tr.shape[1]), np.uint8)
mask = cv2.drawContours(black,[c],0,255, -1)
Mask image: we want the sunset image to be present in the white region
# Create 3-channel mask of float datatype
alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)/255.0
# Perform blending and limit pixel values to 0-255
blended = cv2.convertScaleAbs(tr*(1-alpha) + la*alpha)
Here is the minimalistic code needed to apply the mask:
import cv2
img1 = cv2.imread("img1.jpg")
img2 = cv2.imread("img2.png", cv2.IMREAD_GRAYSCALE)
img1 = cv2.resize(img1, img2.shape[1::-1])
img1[img2 < 128] = 0
cv2.imshow("Image", img1)
cv2.waitKey(0)
Input images img1.jpg and img2.png:
Output:
Related
I'm trying to transfer pixel value from one image and transferring it to other image.
so, basically I have 2 images and my goal is to transfer colors of img1 to 2 according to the regions.
link to img1 img2 and expected image
here I am aware to extract color channel out of an image , but I am not able to achieve the required result. I'll highly appreciate any help.
my approach:
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
os.chdir('/kaggle/input/maskedoutput')
stroke_list = natsorted(os.listdir())
for i,v in enumerate(stroke_list):
image = cv2.imread(v, cv2.IMREAD_UNCHANGED)
if image.shape[2] == 4:
a1 = ~image[:,:,3]
image = cv2.add(cv2.merge([a1,a1,a1,a1]), image)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
plt.imshow((image))
plt.show()
copy = image.copy()
kernel = np.ones((15,15), np.uint8)
closing = cv2.morphologyEx(copy, cv2.MORPH_CLOSE, kernel)
img_erode = cv2.erode(closing, kernel, iterations=1)# to supress black outline
height, width, channel = img_erode.shape
for x1 in range(0,width):
for y1 in range(0,height):
channels_x1y1 = img_erode[y1,x1]
os.chdir('/kaggle/input/maskstrokes')
output = natsorted(os.listdir())
for idx,v1 in enumerate(output):
if(v==v1):
print(v, v1)
img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
subtracted = cv2.subtract(img_out, img_erode)
else:
continue
plt.imshow(cv2.cvtColor(subtracted, cv2.COLOR_BGR2RGB))
plt.show()
here i'm meaning to first erode the original coloured image in order to supress the black outline. Then next extracting color pixels and in the image2 after reading it i'm trying to subtract it with img1 the residual would be the colored outline, but this code is not working gives mte this error:
---------------------------------------------------------------------------
error Traceback (most recent call last)
/tmp/ipykernel_33/3647166721.py in <module>
43 print(v, v1)
44 img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
---> 45 subtracted = cv2.subtract(img_out, img_erode)
46 # if img_out.shape[2] == 4:
47 # a1 = ~img_out[:,:,3]
error: OpenCV(4.5.4) /tmp/pip-req-build-jpmv6t9_/opencv/modules/core/src/arithm.cpp:647: error: (-209:Sizes of input arguments do not match) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function 'arithm_op'
another approach was to directly pick color pixels from image1 and directly transfer it to second image but as you can see the image has 3 parts with different colors and so its not happening
code:
os.chdir('/kaggle/input/maskedoutput')
stroke_list = natsorted(os.listdir())
for i,v in enumerate(stroke_list):
image = cv2.imread(v, cv2.IMREAD_UNCHANGED)
if image.shape[2] == 4:
a1 = ~image[:,:,3]
image = cv2.add(cv2.merge([a1,a1,a1,a1]), image)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
plt.imshow((image))
plt.show()
copy = image.copy()
kernel = np.ones((15,15), np.uint8)
closing = cv2.morphologyEx(copy, cv2.MORPH_CLOSE, kernel)
img_erode = cv2.erode(closing, kernel, iterations=1)# to supress black outline
height, width, channel = img_erode.shape
for x1 in range(0,width):
for y1 in range(0,height):
channels_x1y1 = img_erode[y1,x1]
os.chdir('/kaggle/input/maskstrokes')
output = natsorted(os.listdir())
for idx,v1 in enumerate(output):
if(v==v1):
print(v, v1)
img_out = cv2.imread(v1, cv2.IMREAD_UNCHANGED)
height2, width2, channel2 = img_out.shape
for x1 in range(0,width2):
for y1 in range(0,height2):
channels_x1y1 = img_out[y1,x1]
else:
continue
plt.imshow(cv2.cvtColor(img_out, cv2.COLOR_BGR2RGB))
plt.show()
Blocker image
I prepared a quick fix solution based on the expected output.
I used the following image as input:
Code:
img = cv2.imread('colored_objects.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# binary mask
mask = cv2.threshold(gray,10,255,cv2.THRESH_BINARY)[1]
# inverted binary mask
th = cv2.threshold(gray,10,255,cv2.THRESH_BINARY_INV)[1]
# finding external contours based on inverted binary mask
contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# create copy of original image to draw contours
img2 = img.copy()
In the following, we iterate through each contour.
For each contour:
get the centroid of the contour (centroid)
get the color at the centroid from original image (color)
draw the contour with that color on the copy of original image (img2)
code:
for c in contours:
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
centroid = (cx, cy)
color = tuple(img[cy, cx])
color = ( int (color [ 0 ]), int (color [ 1 ]), int (color [ 2 ]))
print(color)
img2 = cv2.drawContours(img2, [c], 0, tuple(color), -1)
Now we subtract the original from the newly drawn image r. Based on mask, wherever pixels are white we assign white in r
r = img2 - img
r[mask == 255] = (255, 255, 255)
Update:
Expected result for the latest image. The green shade is present on the border as expected. This was obtained using the same code without any changes:
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('jelly.jpg') //reading the img
cv2.imshow(' img',img) //initial image
cv2.waitKey(0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) // converting image to RGB
pixel_vals =img.reshape((-1,3)) //reshaping coloured 3d image to 2d image
pixel_vals = np.float32(pixel_vals)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,100,0.85) //setting criteria for kmeans
k= 5 //number of clusters
retval, labels, centers = cv2.kmeans(pixel_vals,k,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8((centers))
segmented_data = centers[labels.flatten()]
segmented_img = segmented_data.reshape((img.shape)) //final image
cv2.imshow('K-means segmented img',segmented_img) // showing the final image after k means segmentation
cv2.waitKey(0)
cv2.destroyAllWindows() //destroying all window pop-up of images
I want to get only like violet part or brown part according to intensities. I have tried looking but i am not able to find any function. as there is a chance that the blue colour or any other colour is present in different shade. Is there a way to also get particular shade of different colours masking other areas ?
Original Image
K means segmented image
I am not sure what you want, but if you want to save each color as its own image from kmeans in Python/OpenCV, then this should do that.
Input:
import cv2
import numpy as np
# read input and convert to range 0-1
image = cv2.imread('jellyfish.png')
h, w, c = image.shape
# reshape to 1D array
image_2d = image.reshape(h*w, c).astype(np.float32)
# set number of colors
numcolors = 5
numiters = 10
epsilon = 1
attempts = 10
# do kmeans processing
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, numiters, epsilon)
ret, labels, centers = cv2.kmeans(image_2d, numcolors, None, criteria, attempts, cv2.KMEANS_RANDOM_CENTERS)
# reconstitute 2D image of results
centers = np.uint8(centers)
newimage = centers[labels.flatten()]
newimage = newimage.reshape(image.shape)
cv2.imwrite("jellyfish_kmeans.png", newimage)
cv2.imshow('new image', newimage)
cv2.waitKey(0)
k = 0
for center in centers:
# select color and create mask
#print(center)
layer = newimage.copy()
mask = cv2.inRange(layer, center, center)
# apply mask to layer
layer[mask == 0] = [0,0,0]
cv2.imshow('layer', layer)
cv2.waitKey(0)
# save kmeans clustered image and layer
cv2.imwrite("jellyfish_layer{0}.png".format(k), layer)
k = k + 1
Kmeans Result:
Individual Colors:
I am not sure what you want to do because by your description you seem to want one thing and then by the title a completely different one. But I have segmented the parts you wanted.
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('jelly.png')
plt.imshow(img)
plt.show()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pixel_vals =img.reshape((-1,3))
pixel_vals = np.float32(pixel_vals)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,100,0.85)
k= 5
retval, labels, centers = cv2.kmeans(pixel_vals,k,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
clustered_img = labels.reshape((img.shape[0], img.shape[1]))
clusters_to_0 = [1,2,4]
for c in clusters_to_0:
clustered_img[clustered_img == c] = -1
clustered_img[clustered_img!=-1] = 1
clustered_img[clustered_img==-1] = 0
clustered_img
plt.imshow(clustered_img)
plt.show()
I suggest another approach by transforming the image to the HSV channel and then thresholding the Hue channel since it contains the information about the tonality of the colours:
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('jelly.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
(_, th) = cv2.threshold(h, 0, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
plt.subplot(131)
plt.imshow(img)
plt.title('Original image')
plt.subplot(132)
plt.imshow(h)
plt.title('Hue channels of the HSV color-space')
plt.subplot(133)
plt.imshow(th)
plt.title('Thresholded image')
plt.show()
I have detected two lines in an image using cv2. now I want to get the RGB values of both lines in separate variables like left_line_veriable = ['rgb values'], right_line_rgb_values = ['rgb values']
Here is my code:
import cv2
import numpy as np
image = cv2.imread('tape.png')
image = cv2.cvtCOLOR(image, cv2.COLOR_BGR2GRAY)
# Apply adaptive threshold
image_thr = cv2.adaptiveThreshold(image, 255, cv2.THRESH_BINARY_INV, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 81, 2)
# Apply morphological opening with vertical line kernel
kernel = np.ones((image.shape[0], 1), dtype=np.uint8) * 255
image_mop = cv2.morphologyEx(image_thr, cv2.MORPH_OPEN, kernel)
color_detected_img = cv2.bitwise_and(image, image, mask=image_mop)
cv2.imshow('image', color_detected_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
This is the image from which I want to get both line's RGB values in two variables as described above:
Maybe is not the most optimal way, but it is not hard to do. As I said in my comments, you can label the image to kind of segment the lines, then get the mean of the rgb values in it and the average position to get to know which one is left and right. Here is a small script to demonstrate what I am saying. The last part is just to show the results.
import cv2
import numpy as np
# load img and get the greyscale
img = cv2.imread("x.png")
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# label the image
ret, thres = cv2.threshold(grey, 1, 255, cv2.THRESH_BINARY)
labelAmount, labels = cv2.connectedComponents(thres)
# get the mean of the color and position
values = []
# first label (0) is background
for i in range(1, labelAmount):
mask = np.zeros(labels.shape, dtype=np.uint8)
mask[labels == i] = 255
mean = cv2.mean(img, mask)[:-1]
meanPos = np.mean(cv2.findNonZero(mask), axis=0)[0]
values.append((mean, meanPos))
# sort them by x value (left to right)
values = sorted(values, key = lambda v : v[1][0])
left_line_color = values[0][0]
right_line_color = values[1][0]
# just to show the results
left_only = np.zeros(img.shape, dtype=np.uint8)
right_only = np.zeros(img.shape, dtype=np.uint8)
left_only = cv2.line (left_only, (int(values[0][1][0]), 0), (int(values[0][1][0]), img.shape[0]), left_line_color,5 )
right_only = cv2.line (right_only, (int(values[1][1][0]), 0), (int(values[1][1][0]), img.shape[0]), right_line_color,5 )
cv2.imshow("left_line", left_only)
cv2.imshow("right_line", right_only)
cv2.imshow("original", img)
cv2.waitKey(0)
I have been able to remove about 75% of the background from my original image, but I am struggling to fine tune my python code to remove the last bit.
Original Image
Output Image
As you can see there is one section of the background on the lower half of the image that isn't being removed along with the rest.
import os, time
import numpy as np
import cv2
import matplotlib.pyplot as plt
org_file_name = 'IMG_3237_reduced.jpg'
#Read Image File
img = cv2.imread(org_file_name))
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (1,1,1008,756)
rect2 = (11,222,975, 517)
# Perform the GrabCut on the Image File
t1 = time.clock()
cv2.grabCut(img,mask,rect2,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
t2 = time.clock()
print(t2-t1)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
gc_img = img*mask2[:,:,np.newaxis]
# convert to grayscale
gc_img_gray = cv2.cvtColor(gc_img, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(gc_img_gray,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(gc_img)
rgba = [b,g,r, alpha]
gc_split_img = cv2.merge(rgba,4)
# display results
#ax1 = plt.subplot(131); plt.imshow(img)
#ax1.set_title('Original')
#ax2 = plt.subplot(132); plt.imshow(gc_img)
#ax2.set_title('GrabCut')
ax3 = plt.subplot(111); plt.imshow(gc_split_img)
ax3.set_title('GrabCut Split')
plt.show()
I've attached the my working code above. I appreciate any help someone can offer. My plan is once the background is removed, I can do some analysis/statical modeling on the region of interest for further comparison.
If this is a one time process, I don't think you need to use grabcut. I'd suggest something like this, where you use a combination of spatial and simple code value thresholding:
import cv2
import numpy
# Read Image File
img = cv2.imread('NY3Ne.jpg')
# convert RGB to grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
rect_x = [15, 990]
rect_y = [220, 530]
y, x = numpy.indices(img.shape[:2])
# threshold based on otsu's method
img[mask < thresh] = 0
# set everything outside the rectangle to 0
img[(x < rect_x[0])] = 0
img[(x > rect_x[1])] = 0
img[(y < rect_y[0])] = 0
img[(y > rect_y[1])] = 0
cv2.imshow('masked', img)
cv2.waitKey(0)
But if grabcut is necessary for some other reason, I could combine it with simple thresholding to get the desired result. 150 is kind of arbitrary based on your image, but you could substitute Otsu or any other adaptive binary threshold calculation method.
alpha = np.where(gc_img_gray < 150, 255, 0).astype(np.uint8)
gc_img[alpha==0] = 0
More info: https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html
i have trained a model to provide the segment in image and the output image looks like that
the original image is like that
i have tried opencv to subtract the two images by
image1 = imread("cristiano-ronaldo.jpg")
image2 = imread("cristiano-ronaldo_seg.png")
image3 = cv2.absdiff(image1,image2)
but the output is not what i need , i would like to have cristiano and white background , how i can achieve that
Explanation:
As your files have already the right shape (BGR) and (A) it is very easy to accomplish what you are trying to do, here are the steps.
1) Load original image as BGR (In opencv it's reversed rgb)
2) Load "mask" image as a single Channel A
3) Merge the original images BGR channel and consume your mask image as A Alpha
Code:
import numpy as np
import cv2
# Load an color image in grayscale
img1 = cv2.imread('ronaldo.png',3) #READ BGR
img2 = cv2.imread('ronaldoMask.png',0) #READ AS ALPHA
kernel = np.ones((2,2), np.uint8) #Create Kernel for the depth
img2 = cv2.erode(img2, kernel, iterations=2) #Erode using Kernel
width, height, depth = img1.shape
combinedImage = cv2.merge((img1, img2))
cv2.imwrite('ronaldocombine.png',combinedImage)
Output:
After read the segment image, convert to grayscale, then threshold it to get fg-mask and bg-mask. Then use cv2.bitwise_and to "crop" the fg or bg as you want.
#!/usr/bin/python3
# 2017.11.26 09:56:40 CST
# 2017.11.26 10:11:40 CST
import cv2
import numpy as np
## read
img = cv2.imread("img.jpg")
seg = cv2.imread("seg.png")
## create fg/bg mask
seg_gray = cv2.cvtColor(seg, cv2.COLOR_BGR2GRAY)
_,fg_mask = cv2.threshold(seg_gray, 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
_,bg_mask = cv2.threshold(seg_gray, 0, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
## convert mask to 3-channels
fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR)
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR)
## cv2.bitwise_and to extract the region
fg = cv2.bitwise_and(img, fg_mask)
bg = cv2.bitwise_and(img, bg_mask)
## save
cv2.imwrite("fg.png", fg)
cv2.imwrite("bg.png", bg)