Let's say I have an image of a book cover that I want to "flatten". To do so it seems like I would need to perform 2 perspective transforms: one just for the front cover and one just for the back cover:
What would be the most efficient way to do this?
Using a 600x600 pixel image homograpy-test.jpg:
import cv2
import numpy as np
#load image
img = cv2.imread('homography-test.jpg', cv2.IMREAD_COLOR)
#corners of book covers (before)
frontCoverPtsBefore = np.array([[32, 48], [279, 136], [247, 430], [39, 281]], dtype="float32")
backCoverPtsBefore = np.array([[279, 136], [474, 36], [463, 316], [247, 430]], dtype="float32")
#corners of book covers (after)
frontCoverPtsAfter = np.array([[0, 0], [299, 0], [299, 599], [0, 599]], dtype="float32")
backCoverPtsAfter = np.array([[300, 0], [599, 0], [599, 599], [300, 599]], dtype="float32")
#get the transformation matrices for both covers
M_front = cv2.getPerspectiveTransform(frontCoverPtsBefore, frontCoverPtsAfter)
M_back = cv2.getPerspectiveTransform(backCoverPtsBefore, backCoverPtsAfter)
#warpPerspective both images
img_front = cv2.warpPerspective(img, M_front, (600, 600))
img_back = cv2.warpPerspective(img, M_back, (600, 600))
#copy half of the warped back cover into the warped front cover
np.copyto(img_front[:, 300:, :], img_back[:, 300:, :])
#display before and after
cv2.imshow('img', img)
cv2.imshow('img_front', img_front)
cv2.waitKey(0)
cv2.destroyAllWindows()
Before and After:
Related
Original Image
Expected Output.
I am using this code for translating a specific part into the same image, but output is not changing,
import numpy as np
import cv2 as cv
img = cv.imread('eye0.jpg', 0)
rows, cols = img.shape
roi = img[200: 300, 360: 450]
M = np.float32([[1, 0, 100], [0, 1, 50]])
dst = cv.warpAffine(roi, M, roi.shape)
cv.imshow('img', img)
cv.imshow('img', dst)
cv.waitKey(0)
cv.destroyAllWindows()
I see no changes from original image. How can I do so? Moreover, as an openCV newbie I would like to know which function should I use/explore here to get my purpose served?
Copy() function can help you instead of warpAffine(). You can check here also:
Here is output and code:
import numpy as np
import cv2 as cv
img = cv.imread('eye.jpg', 1)
#rows, cols = img.shape
roi = img[80: 100, 140: 160]
img2 = img.copy()
img2[95:115, 140:160]=roi
cv.imshow('img', img)
cv.imshow('imaag', img2)
cv.waitKey(0)
cv.destroyAllWindows()
**Image after warp affine tranformation... but for circling the part it seem difficult..
**
import numpy as np
import cv2 as cv
img = cv.imread('eye.jpg')
roi = img[78: 100, 130: 160]
M = np.float32([[1, 0, 6], [0, 1, 4]])
dst = cv.warpAffine(roi, M, (30, 22))
img[80:102, 132:162] = dst
cv.imwrite("newimage.jpg",img)
cv.imshow('img', img)
cv.imshow('img1',dst)
cv.waitKey(0)
cv.destroyAllWindows()
I want to detect lines in an image which is at 45 degrees only with respect to the origin. I have to do it with 3x3 convolution only. I have solved it such that all lines at 45 degrees are removed and everything else stays(inverse of what I want). Any help in reaching from here to my final goal will be highly appreciated, thanks.
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('Lines.png')
plt.imshow(img, cmap='gray')
plt.show()
kernel = np.array([[0, -1, 0],
[1, 0, 1],
[0, -1, 0]])
dst = cv2.filter2D(img, -1, kernel)
cv2.imwrite("filtered.png", dst)
This is the image before convolution:
This is the image after convolution:
Well by the code you have provided in the question we obtained lines except those which we want to obtain. So we can take that and dilate it to fill the lines.
img = cv2.imread('lines.png')
kernel = np.array([[0, -1, 0],
[1, 0, 1],
[0, -1, 0]])
dst = cv2.filter2D(img, -1, kernel)
kernel = np.ones((5, 5), np.uint8)
dilated = cv2.dilate(dst, kernel, iterations = 1)
Then we need to remove the dots above the lines at 45 degrees so we use morphological opening for that and threshold the image to convert all the lines to pixel values=255.
kernel = np.ones((7, 7), np.uint8)
opening = cv2.morphologyEx(dilated, cv2.MORPH_OPEN, kernel)
_,thresh = cv2.threshold(opening,10,255,cv2.THRESH_BINARY)
Then using cv2.bitwise_and of original image and cv2.bitwise_not of the threshold obtained we obtain our lines.
res = cv2.bitwise_and(img, cv2.bitwise_not(thresh))
We obtain the lines but we need to remove the circle in the middle. For that we use cv2.erode on the original image to obtain only the middle circle, threshold it and then again use cv2.bitwise_and and cv2.bitwise_not to remove it from res.
kernel = np.ones((7, 7), np.uint8)
other = cv2.erode(img, kernel, iterations = 1)
_,thresh = cv2.threshold(other,10,255,cv2.THRESH_BINARY)
result = cv2.bitwise_and(res, cv2.bitwise_not(thresh))
cv2.imshow("Image", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
The filter I used is:
kernel = np.array([[0, -25, 1],
[-25, 5, -25],
[1, -25, 0]])
and the result was:
It wasn't perfect, but hope it helps.
import numpy as np
import cv2
im = cv2.imread("goldstandard.png")
nemo = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
hsv_nemo = cv2.cvtColor(nemo, cv2.COLOR_RGB2HSV)
dictionaryHSV = {
"greenCombo": [[30, 126, 87], [70, 255, 250]],
'red': [[0, 92, 212], [10, 265, 255]],
'blue': [[110, 7, 214], [130, 255, 255]],
'black': [[0, 0, 0], [10, 10, 40]],
'another1': [[20, 245, 151], [40, 255, 231]],
'pink': [[140, 126, 215], [160, 146, 255]]
}
for r1, r2 in dictionaryHSV.values():
lower = np.array(r1)
upper = np.array(r2)
mask = cv2.inRange(hsv_nemo, lower, upper)
# cv2.imshow("masked",mask)
# cv2.waitKey(0)
nm = np.ones((nemo.shape[0], nemo.shape[1], nemo.shape[2]), dtype=np.uint8)
for i in range(nm.shape[0]):
for j in range(nm.shape[1]):
nm[i][j] = (255, 255, 255)
result = cv2.bitwise_and(nm, nm, mask=mask)
cv2.imshow("mappped", result)
cv2.waitKey(0)
i have curve plot images and i want to separate all curves based on color i am getting a problem when i come across black curve i get black curve along with black text in the plot i want to only get the curve not the text. I used color ranges in "H.S.V" color-space to recognize colors. Thanks in advance.
Extract region inside square.
Remove all non black pixels.
Find all contours.
Select a biggest contour - it will be your curve.
The document is https://docs.opencv.org/3.0-beta/modules/shape/doc/shape_transformers.html
void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector& matches)
So that i run my code
import cv2
import numpy as np
import matplotlib.pyplot as plt
tps = cv2.createThinPlateSplineShapeTransformer()
sshape = np.array ([[67, 90], [206, 90], [67, 228], [206, 227]], np.float32)
tshape = np.array ([[64, 63], [205, 122], [67, 263], [207, 192]], np.float32)
sshape = sshape.reshape (1, -1, 2)
tshape = tshape.reshape (1, -1, 2)
matches = list ()
matches.append (cv2.DMatch (0, 0, 0))
matches.append (cv2.DMatch (1,1,0))
matches.append (cv2.DMatch (2, 2, 0))
matches.append (cv2.DMatch (3, 3, 0))
tps.estimateTransformation (tshape, sshape, matches)
ret, tshape = tps.applyTransformation (sshape)
img = cv2.imread ('tiger.jpg', 1)
out_img = tps.warpImage (img)
plt.imshow(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
plt.show()
cv2.waitKey(0)
cv2.waitKey(0)
The result looks like reverse which i expected
enter image description here
So, i change my code tps.estimateTransformation (tshape, sshape, matches)
And i got expected result.
enter image description here
This is wrong in document or my code is fail.
I've been trying to do a 4 point perspective transform in order to start doing some OCR.
Starting with the following image I can detect the number plate
and crop it out with the green box being the bounding box and the red dots being the corners of the rectangle I want to square up.
This is the output of the transform.
At a first look it seams to have done the transform inside out (taking the parts either side rather than between the points).
I'm using the imutils package to do the transform and working from this and this as a guide. I'm sure it's something relatively simple I'm missing.
#!/usr/bin/python
import numpy as np
import cv2
import imutils
from imutils import contours
from imutils.perspective import four_point_transform
img = cv2.imread("sample7-smaller.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray,15,75,75)
v = np.median(blurred)
lower = int(max(0, (1.0 - 0.33) * v))
upper = int(min(255, (1.0 + 0.33) * v))
edged = cv2.Canny(blurred, lower, upper, 255)
conts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
conts = conts[0] if imutils.is_cv2() else conts[1]
conts = sorted(conts, key=cv2.contourArea, reverse=True)
for cnt in conts:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
if len(approx) == 4:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
for i in approx:
cv2.circle(img,(i[0][0], i[0][1]),2,(0,0,255), thickness=4)
warped = four_point_transform(img, approx.reshape(4,2))
cv2.imshow("crop",img[y:y+h,x:x+w])
cv2.imshow("warped", warped)
cv2.waitKey(0)
I would recommend you to use the OpenCV Perspective Transform method, to get the desired results, as per the given image:
First mark the position of src points:
src_pts = np.array([[8, 136], [415, 52], [420, 152], [14, 244]], dtype=np.float32)
And suppose you want to fit this number plate in a matrix of shape 50x200, so destination points would be:
dst_pts = np.array([[0, 0], [200, 0], [200, 50], [0, 50]], dtype=np.float32)
Find the perspective Transform Matrix as :
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warp = cv2.warpPerspective(img, M, (200, 50))
EDIT: As you didn't wanted to hard code the final width, height of plate, So in order to make the calculations more flexible you can calculate the width and height of the plate from the 4 marker points as:
def get_euler_distance(pt1, pt2):
return ((pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2)**0.5
src_pts = np.array([[8, 136], [415, 52], [420, 152], [14, 244]], dtype=np.float32)
width = get_euler_distance(src_pts[0][0], src_pts[0][1])
height = get_euler_distance(src_pts[0][0], src_pts[0][3])
dst_pts = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warp = cv2.warpPerspective(img, M, (width, height))