How to flip image with opencv and python( without cv2.flip) - python

i just want to flip image vertically without cv2.flip(), but output is completely black image. where is my mistake ?
import cv2
import numpy as np
def flipv(imgg):
for i in range(480):
img2= np.zeros([480, 640, 3], np.uint8)
img2[i,:]=imgg[480-i-1,:]
return img2
img = cv2.imread("foto\\test.jpg", 1)
ads= flipv(img)
cv2.imshow("qw",ads)
cv2.waitKey(0)
cv2.destroyAllWindows()

Simple function to flip images using opencv:
def flip(img,axes):
if (axes == 0) :
#horizental flip
return cv2.flip( img, 0 )
elif(axes == 1):
#vertical flip
return cv2.flip( img, 1 )
elif(axes == -1):
#both direction
return cv2.flip( img, -1 )
bflp = flip(img,-1)
plt.imshow(bflp)

You can use broadcasting of arrays as shown below. It is a lot faster:
cv2.imshow("flipped image", im[::-1])

Your code for flipping is correct (depends on how you want to perform your flip, but anyway) but you should create your img2 "outside" of your for loop only once.
def flipv(imgg):
img2= np.zeros([480, 640, 3], np.uint8)
for i in range(480):
img2[i,:]=imgg[480-i-1,:]
return img2

Using numpy.flip:
flip_v = np.flip(img,0)
flip_h = np.flip(img,1)
s.a.: numpy.flip

Related

How to get the pixel coordinates of the region of interest using open-cv?

I'm trying to get the pixel coordinates of a specific roi in a image. I created the roi using mask. The code and the result is shown below.
import cv2
import numpy as np
img = cv2.imread("Inxee.jpg")
img = cv2.resize(img, (640, 480))
mask = np.zeros(img.shape, np.uint8)
points = np.array([[273,167], [363, 167], [573, 353], [63, 353]]) ##taking random points for ROI.
cv2.fillPoly(mask, [points], (100, 0, 100))
img = cv2.addWeighted(img, 0.7, mask, 0.5, 0)
values = img[np.where((mask == (100, 0, 100)).all(axis=1))]
print(values)
##cv2.imshow("mask", mask)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
result image
so in the image we can see the ROI.
I tried to use the
values = img[np.where((mask == (100, 0, 100)).all(axis=1))]
but here I'm getting only values not coordinates.
So is there any way to get those coordinates?
Thanks for the solutions and possibilities friends,
I just did,
val = np.where(mask < 0)
coordinate = list(zip(val[0], val[1]))
print(coordinate)
with this i got the coordinates!
Thanks!
To get the coordinates, you could do it like this:
pixels_in_roi = [(i_x,i_y) for i_x, col in enumerate(mask) for i_y, val in enumerate(col) if val == (100,0,100)]
There are faster ways to do it.
I'm not sure what your goal is in the end, but it sounds like undistorting this area as if it was a top-down view could be the next step. In that case this might help you: https://pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
Edit: Here is a better solution using np.where: https://stackoverflow.com/a/27175491/7438122
The input 'mask' has to be a numpy array then (which it should already be in your case), not a list.
Christoph is right though: If you tell us what you want to achieve, there might be a way without extracting the indices at all.

How to resize and translate a masked image over a background OpenCV and Python

By doing a bit of my own googling and following this tutorial I have created the python script below. It finds the most dominant (common) color in an image and replaces it with another "background" image. It basically creates a mask and places it on top of the background image. My question is how would I resize the mask and translate it. I am a complete beginner to OpenCV with Python so some code examples with explanation would go a long way :).
Here is the script:
import os
#from colorthief import ColorThief
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
imgDirec = "/Users/.../images"
def find_dominant_color(filename):
#Resizing parameters
width, height = 150,150
image = Image.open(filename)
image = image.resize((width, height),resample = 0)
#Get colors from image object
pixels = image.getcolors(width * height)
#Sort them by count number(first element of tuple)
sorted_pixels = sorted(pixels, key=lambda t: t[0])
#Get the most frequent color
dominant_color = sorted_pixels[-1][1]
return dominant_color
filepath = "/Users/.../image.jpg" #Foreground Image
dominant_color = find_dominant_color(filepath)
#dominant_color = color_thief.get_color(quality=1)
print(dominant_color)
image = cv2.imread(filepath)
image_copy = np.copy(image)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
lower_blue = np.array([dominant_color[0]-20, dominant_color[1]-20, dominant_color[2]-20]) ##[R value, G value, B value]
upper_blue = np.array([dominant_color[0]+20, dominant_color[1]+20, dominant_color[2]+20])
#plt.imshow(image_copy)
mask = cv2.inRange(image_copy, lower_blue, upper_blue)
#plt.imshow(mask, cmap='gray')
masked_image = np.copy(image_copy)
masked_image[mask != 0] = [0, 0, 0]
#plt.imshow(masked_image)
background_image = cv2.imread('/Users/.../background1.jpg')
background_image = cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB)
crop_background = background_image[0:image_copy.shape[0], 0:image_copy.shape[1]]
crop_background[mask == 0] = [0, 0, 0]
#plt.imshow(crop_background)
#These Transformations do not work as intended.
newImg = cv2.resize(crop_background, (0,0), fx=2, fy=2)
height, width = masked_image.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
img_translation = cv2.warpAffine(masked_image, T, (width, height))
final_image = crop_background + masked_image
plt.imshow(final_image)
plt.show()
This is image.jpg
This is background1.jpg
And running the script right know I get:
I want to be able to make the person smaller and translate him around the background. How would I do this? Also, is there any way to keep the background image the original size while putting the smaller picture of the person on top? Again I am beginner (primarily an iOS Dev) so there may be a pretty obvious solution. Please enlighten me!
Thanks in advance!
For answering this problem you must find two things in the code. First one is that, in which line the background cropped? This process will be in the below line
crop_background = background_image[0:image_copy.shape[0], 0:image_copy.shape[1]]
So for translating Person in background you must define two offsets that translate image in background. I Will do that like this:
x_offset=100 # translate in x-axis
y_offset=200 # translate in y-axis
crop_background = background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]
So far we added translation feature, but how we can see the whole background instead of cropped background? for adding this feature you should overwrite final_image to the exact location of which we crop the image.
background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]=final_image
by adding this line the new picture will be like this:
so what about resizing the image? there is a function in OpenCV which it's name is cv2.resize by that you can resize image to any size, I reshape your image to (100,200) in the below line and re-run the code:
image = cv2.resize(image,(100,200))
And the result will be:
The whole code will be like the below:
import os
#from colorthief import ColorThief
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
imgDirec = "/home/isv/Desktop/"
def find_dominant_color(filename):
#Resizing parameters
width, height = 150,150
image = Image.open(filename)
image = image.resize((width, height),resample = 0)
#Get colors from image object
pixels = image.getcolors(width * height)
#Sort them by count number(first element of tuple)
sorted_pixels = sorted(pixels, key=lambda t: t[0])
#Get the most frequent color
dominant_color = sorted_pixels[-1][1]
return dominant_color
filepath = "/home/isv/Desktop/image.jpg" #Foreground Image
dominant_color = find_dominant_color(filepath)
#dominant_color = color_thief.get_color(quality=1)
print(dominant_color)
image = cv2.imread(filepath)
image = cv2.resize(image,(100,200)) #added line
image_copy = np.copy(image)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
lower_blue = np.array([dominant_color[0]-20, dominant_color[1]-20, dominant_color[2]-20]) ##[R value, G value, B value]
upper_blue = np.array([dominant_color[0]+20, dominant_color[1]+20, dominant_color[2]+20])
#plt.imshow(image_copy)
mask = cv2.inRange(image_copy, lower_blue, upper_blue)
#plt.imshow(mask, cmap='gray')
masked_image = np.copy(image_copy)
masked_image[mask != 0] = [0, 0, 0]
#plt.imshow(masked_image)
background_image = cv2.imread('/home/isv/Desktop/background1.jpg')
background_image = cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB)
x_offset=100 #added line
y_offset=200 #added line
crop_background = background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset] #change line
crop_background[mask == 0] = [0, 0, 0]
#plt.imshow(crop_background)
#These Transformations do not work as intended.
newImg = cv2.resize(crop_background, (0,0), fx=2, fy=2)
height, width = masked_image.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
img_translation = cv2.warpAffine(masked_image, T, (width, height))
final_image = crop_background + masked_image
background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]=final_image #added line
plt.imshow(final_image)
plt.show()
plt.figure() # added line
plt.imshow(background_image) # added line
plt.show() # added line
I hope that this code will help you.

Python - Remove black outline & overlay PNG image on JPEG image

I have two images:
Fragments from painting
Whole painting
I need to solve two issues:
1st. On the first image, I need to remove the black outline from each fragment. I've tried threshold and erosion, but neither of them worked. How can I do that?
2nd. I can't overlap the first image on the second, and I really don't know why. It always result on the first image overlapping it totally and putting black pixels where it should be possible to see the second image.
I'm using Python3 and OpenCV 3.2, on Ubuntu 18.04.
My program:
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import cv2
import sys
plano_f = cv2.imread("Domenichino_Virgin-and-unicorn.jpg")
sobrepor = cv2.imread("Domenichino_Virgin-and-unicorn_img.png")
plano_f = cv2.cvtColor(plano_f, cv2.COLOR_BGR2GRAY, -1)
#sobrepor_BGRA = cv2.cvtColor(sobrepor, cv2.COLOR_BGR2BGRA)
sobrepor_BGRA = cv2.imread("nova_png.png", -1)
plt.imshow(sobrepor_BGRA),plt.show()
rows, cols, han = sobrepor_BGRA.shape
total = rows*cols
#printProgressBar(0, total, prefix="Executando...", suffix="completo", length=50)
'''for i in range(rows):
for j in range(cols):
if(sobrepor_BGRA[i, j][0] <= 5 and sobrepor_BGRA[i, j][1] <= 5 and sobrepor_BGRA[i, j][2] <= 5 and sobrepor_BGRA[i, j][3] != 0):
sobrepor_BGRA[i, j] = (0, 0, 0, 0)
#printProgressBar(i*j, total, prefix='Executando...', suffix='completo', length=50)
sys.stdout.write("\rExecutando linha " + str(i) + " de " + str(rows) + "...")
sys.stdout.flush()
cv2.imwrite("nova_png.png", sobrepor_BGRA)'''
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
#sobrepor_BGRA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_BGRA2GRAY, -1)
sobrepor_BGRA = cv2.erode(sobrepor_BGRA, kernel, iterations=3)
#sobrepor_BGRA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_GRAY2BGRA)
cv2.imwrite("nova_png2.png", sobrepor_BGRA)
#sobrepor_RGBA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_BGRA2RGBA)
#plt.imshow(sobrepor_RGBA),plt.show()
sys.stdout.write("\nPronto!")
nova_img = cv2.addWeighted(sobrepor_BGRA, 1, plano_f, 0, 0)
cv2.imwrite("combined.png", nova_img)
plt.imshow(nova_img),plt.show()
You can use bitwise operations to do this. The idea is to obtain a mask of the missing sections of the fragments then bitwise-or the two sections together. Here's two halfs of the image, one is the fragments you already have and the other is the missing sections.
We combine both halves to get the whole painting
import cv2
import numpy as np
fragment = cv2.imread('1.jpg')
whole = cv2.imread('2.jpg')
fragment[np.where((fragment <= [250,250,250]).all(axis=2))] = [0]
result1 = cv2.bitwise_and(whole, fragment)
result2 = cv2.bitwise_and(whole, 255 - fragment)
final = result1 + result2
cv2.imshow('result1', result1)
cv2.imshow('result2', result2)
cv2.imshow('final', final)
cv2.waitKey()
1st - your image is a jpeg image which means that the black lines around the pieces are going to be imperfect due to compression artifacts, a simple threshold or dilation isn't going to perfectly remove these. You can try saving in a lossless format and modifying by hand in paint or something to clean up, you may even want to perform this step after doing an erosion and cleaning up most of it.
2nd - why don't you just copy with a mask using the copyTo function, here is an example:
import cv2
img1 = cv2.imread('x2djw.jpg')
img2 = cv2.imread('5RnNh.jpg')
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
thr, img1_mask = cv2.threshold(img1, 250, 255, cv2.THRESH_BINARY_INV)
img1_mask = img1_mask[:, :, 0] & img1_mask[:, :, 1] & img1_mask[:, :, 2]
el = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img1_mask = cv2.erode(img1_mask, el)
img2 = cv2.merge((img2, img2, img2))
img2 = cv2.copyTo(img1, img1_mask, img2)
cv2.imwrite('test_result.png', img2)

OpenCV Python: Normalize image

I'm new to OpenCV. I want to do some preprocessing related to normalization. I want to normalize my image to a certain size. The result of the following code gives me a black image. Can someone point me to what exactly am I doing wrong? The image I am inputting is a black/white image
import cv2 as cv
import numpy as np
img = cv.imread(path)
normalizedImg = np.zeros((800, 800))
cv.normalize(img, normalizedImg, 0, 255, cv.NORM_MINMAX)
cv.imshow('dst_rt', self.normalizedImg)
cv.waitKey(0)
cv.destroyAllWindows()
as one can see at: http://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#cv2.normalize, there is a → dst that say that the result of the normalize function is returned as output parameter. The function doesn't change the input parameter dst in-place.
(The self. in cv.imshow('dst_rt', self.normalizedImg) line is a typo)
import cv2 as cv
import numpy as np
path = r"C:\Users\Public\Pictures\Sample Pictures\Hydrangeas.jpg"
img = cv.imread(path)
normalizedImg = np.zeros((800, 800))
normalizedImg = cv.normalize(img, normalizedImg, 0, 255, cv.NORM_MINMAX)
cv.imshow('dst_rt', normalizedImg)
cv.waitKey(0)
cv.destroyAllWindows()
It's giving you a black image because you are probably using different sizes in img and normalizedImg.
import cv2 as cv
img = cv.imread(path)
img = cv.resize(img, (800, 800))
cv.normalize(img, img, 0, 255, cv.NORM_MINMAX)
cv.imshow('dst_rt', img)
cv.waitKey(0)
cv.destroyAllWindows()
Update: In NumPy there are more intuitive ways to do this ref:
a = np.random.rand(3,2)
# Normalised [0,1]
b = (a - np.min(a))/np.ptp(a)
# Normalised [0,255] as integer: don't forget the parenthesis before astype(int)
c = (255*(a - np.min(a))/np.ptp(a)).astype(int)
# Normalised [-1,1]
d = 2.*(a - np.min(a))/np.ptp(a)-1
When you call cv.imshow() you use self.normalizedImg, instead of normalizedImg.
The self. is used to identify class members and its use in the code you've written is not appropriate. It shouldn't even run as written. However I assume this code has been extracted from a class definition, but you must be consistent in naming variables and self.normalizedImg is different from normalizedImg.

How to Mask an image using Numpy/OpenCV?

I have an image I load with:
im = cv2.imread(filename)
I want to keep data that is in the center of the image. I created a circle as a mask of the area I want to keep.
I created the circle with:
height,width,depth = im.shape
circle = np.zeros((height,width))
cv2.circle(circle,(width/2,height/2),280,1,thickness=-1)
How can I mask out the data outside of the circle from the original image?
masked_data = im * circle
does not work.
Use cv2.bitwise_and and pass the circle as mask.
im = cv2.imread(filename)
height,width,depth = im.shape
circle_img = np.zeros((height,width), np.uint8)
cv2.circle(circle_img,(width/2,height/2),280,1,thickness=-1)
masked_data = cv2.bitwise_and(im, im, mask=circle_img)
cv2.imshow("masked", masked_data)
cv2.waitKey(0)
circle is just a 2D array with 1.0s and 0.0s. Numpy needs help to understand what you want to do with the third dimension of your im so you must give it an extra axis and then your line would work.
masked_data = im * circle[..., np.newaxis]
But note that the masking is simply setting the color to (0, 0, 0) for things outside the circle according to your code if the image lacks an alpha-channel.
However you have another potential problem: circle will be of the default data-type (which probably will be float64 or float32. That's not good for your image, so you should change the line where you create circle to
circle = np.zeros((height, width), dtype=im.dtype)
Using NumPy assignment to an indexed array:
im[circle == 0] = [0, 0, 0]
In this case if you want to have a circular image you must write a new algorithm and first you must be able to access to the coordinates of the pixels. Then you can simply compare pixels that are not within the scope of that circle or not and replace them with some value (or NULL if it's accepted with your image format criteria).
Here is an example:
import cv2
import numpy as np
im = cv2.imread('sss.png')
def facechop(im):
height,width,depth = im.shape
#circle = np.zeros((height,width))
#print circle
x=width/2
y=height/2
circle=cv2.circle(im,(width/2,height/2),180,1,thickness=1)
#newcameramtx, roi=cv2.getOptimalNewCameraMatrix(im,10,(w,h),1,(w,h))
cv2.rectangle(im,(x-180,y-180),(x+180,y+180),(0,0,255),2)
crop_img = im[y-180:y+180,x-180:x+180]
lastim=np.equal(crop_img,circle)
#dd=np.logical_and(crop_img,circle)
for i in range(len(last_im)) :
if last_im[i].all()==False:
crop_img[i]=[0,0,0]
cv2.imshow('im',crop_img)
if __name__ == '__main__':
facechop(im)
while(True):
key = cv2.waitKey(20)
if key in [27, ord('Q'), ord('q')]:
break

Categories

Resources