align - rotate scale transform two images opencv python - python

I have master/golden picture that I want to align another picture that capture to it.
the diff can be in the angle(not that much) and in the scale.
of course also in the brightness and point of view of the camera(again not that much).
when I take the master/golden picture I set known position for the object that interest me, in this case it led. I must do it becuase if one led not turn off I need to know reporting which one is missing, so the known position is mainly for knowing which led are missing. so I do the alignment to search in the known position the led I want to analyze.
for example, this is the master/golden picture,
be noted that the black rectangle is to demonstrate what I am searching for, its not blended on the picture.(only the rectangle around the led).
image I capture to align:
what have I try:
perspective transform using 2/4 anchor from the golden picture.
using algoritem like surf/sift.
surf/sift gave me better result then the first option, but still I have some issues(the led get out of the rectangle).
see below after the alignment, the led are out of the rectangle(no all of them)
increase the rectangle size its not an option because the led are too close to each other.
I think the issue with the sift it that it take wrong matches. I try to avoid them without success.
How I try to avoid wrong matches:
I know for a fact that the angle won't change significantly, so I tried to ignore twin points that have a large slope between them Using this code: good = [m for m in good if abs((kpts1[m.queryIdx].pt[1] - kpts2[m.trainIdx].pt[1])/(kpts1[m.queryIdx].pt[0] - kpts2[m.trainIdx].pt[0] ))<0.5] but it didn't worked as I can still saw matches with big slope.
I think maybe the solution for me is to eliminate matches with slope or matches that are not in the same are by giving threshold to the match point, but I am not sure how to do it.
anyone have a solution for me? maybe try something diff then sift/perspective transform?
all the code I am using:
imgname1 = options.source_path
imgname2 = options.output_path
img1 = cv2.imread(imgname1)
img2 = cv2.imread(imgname2)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create()
# orb = cv2.ORB_create(nfeatures=1500)
matcher = cv2.FlannBasedMatcher(dict(algorithm = 1, trees = 5), {})
# bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True)
kpts1, descs1 = surf.detectAndCompute(gray1,None)
kpts2, descs2 = surf.detectAndCompute(gray2,None)
# kpts1, descs1 = orb.detectAndCompute(gray1,None)
# kpts2, descs2 = orb.detectAndCompute(gray2,None)
matches = matcher.knnMatch(descs1, descs2,2)
# matches = bf.match(descs1,descs2)
matches = sorted(matches, key = lambda x:x[0].distance)
good = [m1 for (m1, m2) in matches if m1.distance < 0.7 * m2.distance]
# good = matches[:50]
good = [m for m in good if abs((kpts1[m.queryIdx].pt[1] - kpts2[m.trainIdx].pt[1])/(kpts1[m.queryIdx].pt[0] - kpts2[m.trainIdx].pt[0] ))>1]
canvas = img2.copy()
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kpts1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kpts2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
# for pt_src in src_pts:
# print(pt_src[0][0])
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RHO ,5.0) #try to use RANCA, but RHO gave me the best results.
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
cv2.polylines(canvas,[np.int32(dst)],True,(0,255,0),3, cv2.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good),MIN_MATCH_COUNT))
matched = cv2.drawMatches(img1,kpts1,canvas,kpts2,good,None,flags=2)
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
perspectiveM = cv2.getPerspectiveTransform(np.float32(dst),pts)
found = cv2.warpPerspective(img2,perspectiveM,(w,h))
orig_path = options.output_path
path = re.split("/",orig_path)
orig_path = orig_path.replace(path[-1],"")
with open(orig_path + '/matrix_transform.pkl', 'wb') as outp:
pickle.dump(perspectiveM, outp, pickle.HIGHEST_PROTOCOL)
pickle.dump(w, outp, pickle.HIGHEST_PROTOCOL)
pickle.dump(h, outp, pickle.HIGHEST_PROTOCOL)
cv2.imwrite(orig_path + "/matched.jpg", matched)
my_str = imgname2
substr = ".jpg"
inserttxt = "_transform"
idx = my_str.index(substr)
my_str = my_str[:idx] + inserttxt + my_str[idx:]
print(my_str)
cv2.imwrite(my_str, found)

Related

Stitching computer images by feature without warping (no camera images)

I've did quite a search about image stitching on python and most are for panoramic images, warping and rotating the images to combine them into one.
What I'm trying to do is using computer images, so they are digital and can be template matched without a problem, it will always be 2D without need of warping.
Basically here I have pieces of a map that is zoomed in and I want to make a massive image of this small pictures, here we have all the images used: https://imgur.com/a/HZIeT3z
import os
import numpy as np
import cv2
def stitchImagesWithoutWarp(img1, img2):
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
good_matches = matches[:10]
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
start = (abs(int(dst_pts[0][0][0]-src_pts[0][0][0])), abs(int(dst_pts[0][0][1]-src_pts[0][0][1])))
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((start[1]+h1,start[0]+w1,3), np.uint8)
vis[start[1]:start[1]+h1, start[0]:start[0]+w1, :3] = img1
vis[:h2, :w2, :3] = img2
return vis
imgList = []
for it in os.scandir("images"):
imgList.append(cv2.imread(it.path))
vis = stitchImagesWithoutWarp(imgList[0],imgList[1])
for index in range(2,len(imgList)):
cv2.imshow("result", vis)
cv2.waitKey()
vis = stitchImagesWithoutWarp(vis,imgList[index])
By running this code I can successfully stitch the first four images together, such as this:
But once I stitch the fifth image it seems to have wrong match and incorrectly, but I always get the best match by distance on NORM_HAMMING, this is the result:
The thing is, this is the first image, in this order, that the best match point (var start) is negative in the x axis, here is the matching points in the imgur order:
(7, 422)
(786, 54)
(394, 462)
(-350, 383)
I attempted switching the top image, doing specific code for negative match but I've believe I was deviating the performance.
Also noting from the docs the first image should be the query and the second supposed to be the target, but I couldn't get it to work by inverting the vis variable in function param.
The main issue here was when recognized points weren't on the screen (negative values), it needs offsets to adjust, I also incremented a little bit to the code and verified if the matches were legit, as if all the calculated displacement were in average the around the matched first pick in brute force.
with the average of 2MB for each image, without preprocessing the images/downscaling/compressing, after stitching 9 images together I got the average of 1050ms in my PC, as for other algorithms tested (that warped the image) took around 2-3seconds for stitching 2 of those images.
here is the final result:
import os
import numpy as np
import cv2
def averageTuple(tupleList):
avgX, avgY = 0,0
for tuple in tupleList:
avgX += tuple[0]
avgY += tuple[1]
return (int(avgX/len(tupleList)),int(avgY/len(tupleList)))
def tupleInRange(t1, t2, dif=3):
if t1[0] + dif > t2[0] and t1[0] - dif < t2[0]:
if t1[1] + dif > t2[1] and t1[1] - dif < t2[1]:
return True
return False
def rgbToRGBA(img):
b_channel, g_channel, r_channel = cv2.split(img)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
def cropAlpha(img,extraRange=0.05):
y, x = img[:, :, 3].nonzero() # get the nonzero alpha coordinates
minx = int(np.min(x)*(1-extraRange))
miny = int(np.min(y)*(1-extraRange))
maxx = int(np.max(x)*(1+extraRange))
maxy = int(np.max(y)*(1+extraRange))
return img[miny:maxy, minx:maxx]
def stitchImagesWithoutWarp(img1, img2):
if len(cv2.split(img1)) != 4:
img1 = rgbToRGBA(img1)
if len(cv2.split(img2)) != 4:
img2 = rgbToRGBA(img2)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
good_matches = matches[:10]
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
pointsList = []
for index in range(0,len(src_pts)):
curPoint = (int(dst_pts[index][0][0]-src_pts[index][0][0])), (int(dst_pts[index][0][1]-src_pts[index][0][1]))
pointsList.append(curPoint)
start = pointsList[0]
avgTuple = averageTuple(pointsList)
if not tupleInRange(start, avgTuple): return img1
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
ax = abs(start[0])
ay = abs(start[1])
vis = np.zeros((ay+h1,ax+w1,4), np.uint8)
ofst2 = (ax if start[0]<0 else 0, ay if start[1]<0 else 0)
ofst1 = (0 if start[0]<0 else ax, 0 if start[1]<0 else ay)
vis[ofst1[1]:ofst1[1]+h1, ofst1[0]:ofst1[0]+w1, :4] = img1
vis[ofst2[1]:ofst2[1]+h2, ofst2[0]:ofst2[0]+w2, :4] = img2
return cropAlpha(vis)
imgList = []
for it in os.scandir("images"):
imgList.append(cv2.imread(it.path))
vis = stitchImagesWithoutWarp(imgList[0],imgList[1])
for index in range(2,len(imgList)):
vis = stitchImagesWithoutWarp(vis,imgList[index])
cv2.imwrite("output.png", cropAlpha(vis,0))
here is the output image (compressed in JPEG for stackoverflow):

How to find orientation of a particular SIFT feature/description in OpenCV?

So I have a template and an image. I want to find the location and orientation of the template inside the image. I am using SIFT to find features and description.
Problem is only one feature is consistently correct at recognizing the image. Homography requires at least 4 features to work. error: (-28:Unknown error code -28) The input arrays should have at least 4 corresponding point sets to calculate Homography in function 'cv::findHomography'
Since I am working with 2D image (with same scale), position and rotation of even one correct feature should be enough to provide the location and rotation of the template in the image.
From OpenCV Docs https://docs.opencv.org/3.4/da/df5/tutorial_py_sift_intro.html
OpenCV also provides cv.drawKeyPoints() function which draws the small
circles on the locations of keypoints. If you pass a flag,
cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS to it, it will draw a circle
with size of keypoint and it will even show its orientation.
However the image I am working is too low resolution to actually see the circles and I need numbers which can compared.
All the other examples of finding orientation I find on the internet use edge detection. However There is no straight edge whose slope can be easily calculated exist in my template.
This solution can help, however my images could potentially have other unwanted objects which will mess with "minAreaRect". If there is any other solution, please let me know.
I have looked for tutorial, books, documentation on how to crunch the numbers in 'keypoints' and 'description', but I could not find any.
Perhaps I should use SURF -which is faster with 2d, same color images- but it is not available in latest opencv version.
Template to be searched
Image to be searched in
Matched
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
print (des1)
# BFMatcher with default params
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2,k=2)
# Apply ratio test
good = []
good_match = []
for m,n in matches:
if m.distance < .5*n.distance:
good.append([m])
good_match.append(m)
print('good matches are')
print(good)
print(good_match)
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(img3),plt.show()
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good_match ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good_match ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()

OpenCV Key point matcher matches to the wrong areas in Python for certificate template and image

I was using the code given on this site to align a photograph of my certificate to a 'template' i created by converting my pdf certificate to png and deleting my name, candidate id and certification date. Sadly it key point matches the wrong areas (click here for image), specifically matching my name, canditate ID and the date to areas in the certificate when it shouldn't. Oddly enough it works fine when I key point match the photograph with itself (i.e.: without deleting my name, date etc.)
I have tried using different matchers including sift and the brute force matcher but still all result in the same problem. Does anyone know why this may be happening and is there anything I can try to overcome this?
Thanks :)
Here is the code that I am using:
# import the necessary packages
#import numpy as np
import imutils
import cv2
def align_images(image, template, maxFeatures=500, keepPercent=0.2,
debug=True):
# convert both the input image and template to grayscale
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
templateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# use ORB to detect keypoints and extract (binary) local
# invariant features
orb = cv2.ORB_create(maxFeatures)
(kpsA, descsA) = orb.detectAndCompute(imageGray, None)
(kpsB, descsB) = orb.detectAndCompute(templateGray, None)
# match the features
method = cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING
matcher = cv2.DescriptorMatcher_create(method)
matches = matcher.match(descsA, descsB, None)
# sort the matches by their distance (the smaller the distance,
# the "more similar" the features are)
matches = sorted(matches, key=lambda x:x.distance)
# keep only the top matches
keep = int(len(matches) * keepPercent)
matches = matches[:keep]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
matches = cv2.drawMatches(image, kpsA,template,kpsB,matches,None)
matches = imutils.resize(matches, width = 1000)
cv2.imshow("Matched Keypoints",matches)
cv2.waitKey(0)
for i, match in enumerate(matches):
points1[i, :] = kpsA[match.queryIdx].pt
points2[i, :] = kpsB[match.trainIdx].pt
# Find homography
H, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# use the homography matrix to align the images
(h, w) = template.shape[:2]
aligned = cv2.warpPerspective(image, H, (w, h))
# return the aligned image
return aligned
image = cv2.imread(r"C:\Users\Soffie\Documents\GrayceAssignments\certificate_scanner\certif_image2.jpg")
template = cv2.imread(r"C:\Users\Soffie\Documents\GrayceAssignments\certificate_scanner\certificate_template.png")
aligned = align_images(image, template)
aligned = imutils.resize(aligned,width =700)
template = imutils.resize(template,width = 700)
stacked = np.hstack([aligned,template])
overlay = template.copy()
output = aligned.copy()
cv2.addWeighted(overlay,0.5,output,0.5,0,output)
cv2.imshow("Image Alignment Stacked",stacked)
cv2.imshow("Image Alignment Overlay", output)
cv2.waitKey(0)

Removing grid in scanned/photographed medical Documents

I'm a dental student and currently trying to write a script for analyzing and extracting handwritten digits from dental records. I already have a rough version of the script finished but my recognition rate is pretty low. A big problem with analyzing the data is a grid that proves difficult to remove.
Scanned form that I want to analyse (white fields are for anonymity):
Empty form:
I've tried different solutions for this problem (Erosion/Dilation, HoughLineTransform and susbtraction of the Lines).
Using featurematching and substracting with an empty template currently give me the best results.
Results:
Eroding and dilating this image gives even better results
Results:
![][4]
But this needs a new calibration nearly every time i try it.
Do you know of a more elegant solution to my problem.
Could SURF matching give better results?
Thank you very much!
Here's my code so far:
GOOD_MATCH_PERCENT = 0.15
def match_img_to_template(input_img, template_img, MAX_FEATURES, GOOD_MATCH_PERCENT):
# blurring of the input image
template_img = cv2.GaussianBlur(template_img, (3, 3), cv2.BORDER_DEFAULT)
# equalizing the histogramm of the input image
img_preprocessed = cv2.equalizeHist(input_img)
# ORB Detector
orb = cv2.ORB_create(MAX_FEATURES)
kp1, des1 = orb.detectAndCompute(img_preprocessed, None)
kp2, des2 = orb.detectAndCompute(template_img, None)
# Brute Force Matching
matcher= cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(des1, des2, None)
matches.sort(key=lambda x:x.distance, reverse=False)
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width = template_img.shape
input_warped = cv2.warpPerspective(input_img, h, (width, height))
ret1, input_warped_thresh = cv2.threshold(input_warped,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
diff = cv2.absdiff(template_img, input_warped_thresh)
ret, diff = cv2.threshold(diff, 20, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C + cv2.THRESH_BINARY)
diff = cv2.equalizeHist(diff)
# Create kernels
kernel1 = np.ones((3,3),np.uint8)
kernel2 = np.ones((6,6), np.uint8)
# erode dilate to remove the grid
diff_erode = cv2.erode(diff,kernel1)
diff_dilated = cv2.dilate(diff_erode,kernel2)
# invert diff_dilate
diff_dilated_inv = cv2.bitwise_not(diff_dilated)
return diff_dilated_inv

Opencv Python - Similarity score from Feature matching + Homograpy

I have several fish images in my database , My Goal is to find similarity score between user input fish image and images in database. For that I am using opencv Feature matching + Homograpy from this link.
http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.html#feature-homography
My current code is as followed.
query_image = '/home/zealous/Pictures/train_images/AbudefdufWhitleyiJER.jpg'
trained_image_folder = '/home/zealous/Pictures/train_images'
My current code is as followed.
def feature_matcher(query_image, image_folder):
min_match_count = 10
img1 = cv2.imread(query_image, 0)
surf = cv2.xfeatures2d.SURF_create(800)
kp1, des1 = surf.detectAndCompute(img1, None)
bf = cv2.BFMatcher(cv2.NORM_L2)
all_files = next(os.walk(image_folder))[2]
for file_name_temp in all_files:
try:
train_image = image_folder + '/' + file_name_temp
img2 = cv2.imread(train_image, 0)
surf = cv2.xfeatures2d.SURF_create(800)
kp2, des2 = surf.detectAndCompute(img2, None)
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good) > min_match_count:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h, w = img1.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts, M)
if not M==None:
print "\n"
print "-"*2, file_name_temp
print "number of good matches", len(good)
print "*"*10, matchesMask
I am getting pretty good output which I am assuming by seeing number of good matches and matchesMask variable (which contains some 0's and 1's). If database contains same image as input image then there will be many good matches and all matchesMask elements will be 1.
My question is how to calculate similarity score based on this? should I assume that the more number of 1's (Inliers) are there in matchesMask, more both images are similar or should I take ratio between number of 1's(inliers) and 0's(outliers) and calculate similarity based on that.
I know this has been discussed in many questions , but all the suggestions and answers are in C++ language , so I cant figure out solution..
In a similarity score you don't want to include outliers - they are outliers because they don't help with your data. Just take the number of 1s (inliers) as the similarity score - you should get decent results.

Categories

Resources