Unable to find enough matches between two images to stitch them both - python

I'm trying to stitch two images together, but I'm failing to do so because the program doesn't detect enough matches between the two images
Here's the code:
import numpy as np
import imutils
import cv2
class Stitcher:
def __init__(self):
self.isv3 = imutils.is_cv3()
def stitch(self, images, ratio=0.75, reprojThresh=5.0,
showMatches=False):
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)
if M is None:
return None
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
return (result, vis)
return result
def detectAndDescribe(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if self.isv3:
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
else:
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
And here are the original images:
Image A:
Image B:
matched points:
The result of stitching:
The result is nowhere as desired, and if I'm correct it's due to there not being enough matching points between the two images.

Related

Stitching computer images by feature without warping (no camera images)

I've did quite a search about image stitching on python and most are for panoramic images, warping and rotating the images to combine them into one.
What I'm trying to do is using computer images, so they are digital and can be template matched without a problem, it will always be 2D without need of warping.
Basically here I have pieces of a map that is zoomed in and I want to make a massive image of this small pictures, here we have all the images used: https://imgur.com/a/HZIeT3z
import os
import numpy as np
import cv2
def stitchImagesWithoutWarp(img1, img2):
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
good_matches = matches[:10]
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
start = (abs(int(dst_pts[0][0][0]-src_pts[0][0][0])), abs(int(dst_pts[0][0][1]-src_pts[0][0][1])))
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((start[1]+h1,start[0]+w1,3), np.uint8)
vis[start[1]:start[1]+h1, start[0]:start[0]+w1, :3] = img1
vis[:h2, :w2, :3] = img2
return vis
imgList = []
for it in os.scandir("images"):
imgList.append(cv2.imread(it.path))
vis = stitchImagesWithoutWarp(imgList[0],imgList[1])
for index in range(2,len(imgList)):
cv2.imshow("result", vis)
cv2.waitKey()
vis = stitchImagesWithoutWarp(vis,imgList[index])
By running this code I can successfully stitch the first four images together, such as this:
But once I stitch the fifth image it seems to have wrong match and incorrectly, but I always get the best match by distance on NORM_HAMMING, this is the result:
The thing is, this is the first image, in this order, that the best match point (var start) is negative in the x axis, here is the matching points in the imgur order:
(7, 422)
(786, 54)
(394, 462)
(-350, 383)
I attempted switching the top image, doing specific code for negative match but I've believe I was deviating the performance.
Also noting from the docs the first image should be the query and the second supposed to be the target, but I couldn't get it to work by inverting the vis variable in function param.
The main issue here was when recognized points weren't on the screen (negative values), it needs offsets to adjust, I also incremented a little bit to the code and verified if the matches were legit, as if all the calculated displacement were in average the around the matched first pick in brute force.
with the average of 2MB for each image, without preprocessing the images/downscaling/compressing, after stitching 9 images together I got the average of 1050ms in my PC, as for other algorithms tested (that warped the image) took around 2-3seconds for stitching 2 of those images.
here is the final result:
import os
import numpy as np
import cv2
def averageTuple(tupleList):
avgX, avgY = 0,0
for tuple in tupleList:
avgX += tuple[0]
avgY += tuple[1]
return (int(avgX/len(tupleList)),int(avgY/len(tupleList)))
def tupleInRange(t1, t2, dif=3):
if t1[0] + dif > t2[0] and t1[0] - dif < t2[0]:
if t1[1] + dif > t2[1] and t1[1] - dif < t2[1]:
return True
return False
def rgbToRGBA(img):
b_channel, g_channel, r_channel = cv2.split(img)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
def cropAlpha(img,extraRange=0.05):
y, x = img[:, :, 3].nonzero() # get the nonzero alpha coordinates
minx = int(np.min(x)*(1-extraRange))
miny = int(np.min(y)*(1-extraRange))
maxx = int(np.max(x)*(1+extraRange))
maxy = int(np.max(y)*(1+extraRange))
return img[miny:maxy, minx:maxx]
def stitchImagesWithoutWarp(img1, img2):
if len(cv2.split(img1)) != 4:
img1 = rgbToRGBA(img1)
if len(cv2.split(img2)) != 4:
img2 = rgbToRGBA(img2)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
good_matches = matches[:10]
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
pointsList = []
for index in range(0,len(src_pts)):
curPoint = (int(dst_pts[index][0][0]-src_pts[index][0][0])), (int(dst_pts[index][0][1]-src_pts[index][0][1]))
pointsList.append(curPoint)
start = pointsList[0]
avgTuple = averageTuple(pointsList)
if not tupleInRange(start, avgTuple): return img1
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
ax = abs(start[0])
ay = abs(start[1])
vis = np.zeros((ay+h1,ax+w1,4), np.uint8)
ofst2 = (ax if start[0]<0 else 0, ay if start[1]<0 else 0)
ofst1 = (0 if start[0]<0 else ax, 0 if start[1]<0 else ay)
vis[ofst1[1]:ofst1[1]+h1, ofst1[0]:ofst1[0]+w1, :4] = img1
vis[ofst2[1]:ofst2[1]+h2, ofst2[0]:ofst2[0]+w2, :4] = img2
return cropAlpha(vis)
imgList = []
for it in os.scandir("images"):
imgList.append(cv2.imread(it.path))
vis = stitchImagesWithoutWarp(imgList[0],imgList[1])
for index in range(2,len(imgList)):
vis = stitchImagesWithoutWarp(vis,imgList[index])
cv2.imwrite("output.png", cropAlpha(vis,0))
here is the output image (compressed in JPEG for stackoverflow):

OpenCV, Python: Perspective warping problem in aerial image stitching

Currently, I'm working on image stitching of aerial footage. I'm using the dataset, get from OrchardDataset. First of all, thanks to some great answers on stackoverflow, especially the answer from #alkasm (Here and Here). But I having an issue, as you can see below at Gap within the stitched image section.
I used the H21, H31, H41, etc to wrap the images. The stitched image using H21 is excellent, but when wrap the img3 to current stitched image using H31, result shown terrible alignment between img3 and current stitched image. As the more images I wrap, the gap gets bigger and the images totally not well aligned.
Does the brillant stackoverflow community have an ideas on how can I solve this problem?
These are the steps I use to stitch the images:
Extract the frame every second from the footage and undistort the image to get rid of fish-eye effect using the provided camera calibration matrix.
Compute the SIFT feature descriptors. Set up macther using FLANN kd-tree and find matches between the images. Find the Homography (H21, H32, H43 and etc, where H21 refer to the homography which warps imag2 into coordinates of img1)
Compose the homography with the previous homographies to get net homography using the method suggested in Here. (Compute H31, H41, H51, etc)
Wrap the images using the answer provided in Here.
Gap within the stitched image:
I'm using the first 10 images get from OrchardDataSet.
Stitched Image with Gaps
Here's portion of my script:
main.py
ref_img is the first frame (img1). AdjHomoSet contain the images to be wraped (img2, img3, img4, etc). AccHomoSet contain the net homography (H31, H41, H51, etc)
temp_mosaic = ref_img
h, w = temp_mosaic.shape[:2]
# Wrap the Images
for x in range(1, (len(AccHomoSet)+1)):
query_img = AdjHomoSet['H%d%d'%(x+1,(x))][1]
M_homo = AccHomoSet['H%d1'%(x+1)]
M_homo_inv = np.linalg.inv(M_homo)
(shifted_transf, dst_padded) = warpPerspectivePadded(query_img,
temp_mosaic,
M_homo_inv)
dst_pad_h, dst_pad_w = dst_padded.shape[:2]
next_img_warp = cv2.warpPerspective(query_img, shifted_transf,
(dst_pad_w, dst_pad_h),
flags=cv2.INTER_NEAREST)
# Put the base image on an enlarged palette
enlarged_base_img = np.zeros((dst_pad_h, dst_pad_w, 3),
np.uint8)
# Create masked composite
(ret,data_map) = cv2.threshold(cv2.cvtColor(next_img_warp,
cv2.COLOR_BGR2GRAY),
0, 255, cv2.THRESH_BINARY)
# add base image
enlarged_base_img = cv2.add(enlarged_base_img, dst_padded,
mask=np.bitwise_not(data_map),
dtype=cv2.CV_8U)
final_img = cv2.add(enlarged_base_img, next_img_warp,
dtype=cv2.CV_8U)
temp_mosaic = final_img
warpPerspectivePadded.py
def warpPerspectivePadded(image, temp_mosaic, homography):
src_h, src_w = image.shape[:2]
lin_homg_pts = np.array([[0, src_w, src_w, 0],
[0, 0, src_h, src_h],
[1, 1, 1, 1]])
trans_lin_homg_pts = homography.dot(lin_homg_pts)
trans_lin_homg_pts /= trans_lin_homg_pts[2,:]
minX = np.floor(np.min(trans_lin_homg_pts[0])).astype(int)
minY = np.floor(np.min(trans_lin_homg_pts[1])).astype(int)
maxX = np.ceil(np.max(trans_lin_homg_pts[0])).astype(int)
maxY = np.ceil(np.max(trans_lin_homg_pts[1])).astype(int)
# add translation to the transformation matrix to shift to positive values
anchorX, anchorY = 0, 0
transl_transf = np.eye(3,3)
if minX < 0:
anchorX = -minX
transl_transf[0,2] += anchorX
if minY < 0:
anchorY = -minY
transl_transf[1,2] += anchorY
shifted_transf = transl_transf.dot(homography)
shifted_transf /= shifted_transf[2,2]
# create padded destination image
temp_mosaic_h, temp_mosaic_w = temp_mosaic.shape[:2]
pad_widths = [anchorY, max(maxY, temp_mosaic_h) - temp_mosaic_h,
anchorX, max(maxX, temp_mosaic_w) - temp_mosaic_w]
dst_padded = cv2.copyMakeBorder(temp_mosaic, pad_widths[0],
pad_widths[1],pad_widths[2],
pad_widths[3],
cv2.BORDER_CONSTANT)
return (shifted_transf, dst_padded)
Updates:
Well, here's my code for image stitching. However, this solution is not perfect but hope it would be helpful to someone else. This solution is good enough for generating a panaroma view, SIFT+FLANN did the best to the dataset, Stitched image of the dataset with Straightline flight pattern. The interframes alignment is terribly shifted and visible skewness is obtained when stitching the dataset with lawnmower flight pattern, Stitched image of the dataset with lawnmower flight pattern and this solution absolutely not an ideal solution for orthomosaic.
imageStitcher.py
import cv2
import numpy as np
import glob
import os
import time
#import math
from colorama import Style, Back
import xlsxwriter as xls
"""
Important Parameter
-------------------
detector_type (string): type of determine, "sift" or "orb"
Defaults to "sift".
matcher_type (string): type of determine, "flann" or "bf"
Defaults to "flann".
resize_ratio (int) = number needed to decrease the input images size
output_height_times (int): determines the output height based on input image height.
Defaults to 2.
output_width_times (int): determines the output width based on input image width.
Defaults to 4.
"""
detector_type = "sift"
matcher_type = "flann"
resize_ratio = 3
output_height_times = 20
output_width_times = 15
gms = False
visualize = True
image_dir = "image/Input"
key_frame = "image/Input/frame1.jpg"
output_dir = "image/Input"
class ImageStitching:
def __init__(self, first_image,
output_height_times = output_height_times,
output_width_times = output_width_times,
detector_type = detector_type,
matcher_type = matcher_type):
"""This class processes every frame and generates the panorama
Args:
first_image (image for the first frame): first image to initialize the output size
output_height_times (int, optional): determines the output height based on input image height. Defaults to 2.
output_width_times (int, optional): determines the output width based on input image width. Defaults to 4.
detector_type (str, optional): the detector for feature detection. It can be "sift" or "orb". Defaults to "sift".
"""
self.detector_type = detector_type
self.matcher_type = matcher_type
if detector_type == "sift":
# SIFT feature detector
self.detector = cv2.xfeatures2d.SIFT_create(nOctaveLayers = 3,
contrastThreshold = 0.04,
edgeThreshold = 10,
sigma = 1.6)
if matcher_type == "flann":
# FLANN: the randomized kd trees algorithm
FLANN_INDEX_KDTREE = 1
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict (checks=200)
self.matcher = cv2.FlannBasedMatcher(flann_params,search_params)
else:
# Brute-Force matcher
self.matcher = cv2.BFMatcher()
elif detector_type == "orb":
# ORB feature detector
self.detector = cv2.ORB_create()
self.detector.setFastThreshold(0)
if matcher_type == "flann":
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
search_params = dict (checks=200)
self.matcher = cv2.FlannBasedMatcher(flann_params,search_params)
else:
# Brute-Force-Hamming matcher
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
self.record = []
self.visualize = visualize
self.output_img = np.zeros(shape=(int(output_height_times * first_image.shape[0]),
int(output_width_times*first_image.shape[1]),
first_image.shape[2]))
self.process_first_frame(first_image)
# output image offset
self.w_offset = int(self.output_img.shape[0]/2 - first_image.shape[0]/2)
self.h_offset = int(self.output_img.shape[1]/2 - first_image.shape[1]/2)
self.output_img[self.w_offset:self.w_offset+first_image.shape[0],
self.h_offset:self.h_offset+first_image.shape[1], :] = first_image
a = self.output_img
heightM, widthM = a.shape[:2]
a = cv2.resize(a, (int(widthM / 4),
int(heightM / 4)),
interpolation=cv2.INTER_AREA)
# cv2.imshow('output', a)
self.H_old = np.eye(3)
self.H_old[0, 2] = self.h_offset
self.H_old[1, 2] = self.w_offset
def process_first_frame(self, first_image):
"""processes the first frame for feature detection and description
Args:
first_image (cv2 image/np array): first image for feature detection
"""
self.base_frame_rgb = first_image
base_frame_gray = cv2.cvtColor(first_image, cv2.COLOR_BGR2GRAY)
base_frame = cv2.GaussianBlur(base_frame_gray, (5,5), 0)
self.base_features, self.base_desc = self.detector.detectAndCompute(base_frame, None)
def process_adj_frame(self, next_frame_rgb):
"""gets an image and processes that image for mosaicing
Args:
next_frame_rgb (np array): input of current frame for the mosaicing
"""
self.next_frame_rgb = next_frame_rgb
next_frame_gray = cv2.cvtColor(next_frame_rgb, cv2.COLOR_BGR2GRAY)
next_frame = cv2.GaussianBlur(next_frame_gray, (5,5), 0)
self.next_features, self.next_desc = self.detector.detectAndCompute(next_frame, None)
self.matchingNhomography(self.next_desc, self.base_desc)
if len(self.matches) < 4:
return
print ("\n")
self.warp(self.next_frame_rgb, self.H)
# For record purpose: save into csv file later
self.record.append([len(self.base_features), len(self.next_features),
self.no_match_lr, self.no_GMSmatches, self.inlier, self.inlierRatio, self.reproError])
# loop preparation
self.H_old = self.H
self.base_features = self.next_features
self.base_desc = self.next_desc
self.base_frame_rgb = self.next_frame_rgb
def matchingNhomography(self, next_desc, base_desc):
"""matches the descriptors
Args:
next_desc (np array): current frame descriptor
base_desc (np array): previous frame descriptor
Returns:
array: and array of matches between descriptors
"""
# matching
if self.detector_type == "sift":
pair_matches = self.matcher.knnMatch(next_desc, trainDescriptors = base_desc,
k = 2)
"""
Store all the good matches as per Lowe's ratio test'
The Lowe's ratio is refer to the journal "Distinctive
Image Features from Scale-Invariant Keypoints" by
David G. Lowe.
"""
lowe_ratio = 0.8
matches = []
for m, n in pair_matches:
if m.distance < n.distance * lowe_ratio:
matches.append(m)
self.no_match_lr = len(matches)
# Rate of matches (Lowe's ratio test)
rate = float(len(matches) / ((len(self.base_features) + len(self.next_features))/2))
print (f"Rate of matches (Lowe's ratio test): {Back.RED}%f{Style.RESET_ALL}" % rate)
elif self.detector_type == "orb":
if self.matcher_type == "flann":
matches = self.matcher.match(next_desc, base_desc)
'''
lowe_ratio = 0.8
matches = []
for m, n in pair_matches:
if m.distance < n.distance * lowe_ratio:
matches.append(m)
'''
self.no_match_lr = len(matches)
# Rate of matches (Lowe's ratio test)
rate = float(len(matches) / (len(base_desc) + len(next_desc)))
print (f"Rate of matches (Lowe's ratio test): {Back.RED}%f{Style.RESET_ALL}" % rate)
else:
pair_matches = self.matcher.match(next_desc, base_desc)
# Rate of matches (before Lowe's ratio test)
self.no_match_lr = len(pair_matches)
rate = float(len(pair_matches) / (len(base_desc) + len(next_desc)))
print (f"Rate of matches: {Back.RED}%f{Style.RESET_ALL}" % rate)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# OPTIONAL: used to remove the unmatch pair match
matches = cv2.xfeatures2d.matchGMS(self.next_frame_rgb.shape[:2],
self.base_frame_rgb.shape[:2],
self.next_features,
self.base_features, matches,
withScale = False, withRotation = False,
thresholdFactor = 6.0) if gms else matches
self.no_GMSmatches = len(matches) if gms else 0
# Rate of matches (GMS)
rate = float(self.no_GMSmatches / (len(base_desc) + len(next_desc)))
print (f"Rate of matches (GMS): {Back.CYAN}%f{Style.RESET_ALL}" % rate)
# OPTIONAL: Obtain the maximum of 20 best matches
# matches = matches[:min(len(matches), 20)]
# Visualize the matches.
if self.visualize:
match_img = cv2.drawMatches(self.next_frame_rgb, self.next_features, self.base_frame_rgb,
self.base_features, matches, None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imshow('matches', match_img)
self.H, self.status, self.reproError = self.findHomography(self.next_features, self.base_features, matches)
print ('inlier/matched = %d / %d' % (np.sum(self.status), len(self.status)))
self.inlier = np.sum(self.status)
self.inlierRatio = float(np.sum(self.status)) / float(len(self.status))
print ('inlierRatio = ', self.inlierRatio)
# len(status) - np.sum(status) = number of detected outliers
'''
TODO -
To minimize or get rid of cumulative homography error is use block bundle adjustnment
Suggested from "Multi View Image Stitching of Planar Surfaces on Mobile Devices"
Using 3-dimentional multiplication to find cumulative homography is very sensitive
to homography error.
'''
# 3-dimensional multiplication to find cumulative homography to the reference keyframe
self.H = np.matmul(self.H_old, self.H)
self.H = self.H/self.H[2,2]
self.matches = matches
return matches
# staticmethod
def findHomography(base_features, next_features, matches):
"""gets two matches and calculate the homography between two images
Args:
base_features (np array): keypoints of image 1
next_features (np_array): keypoints of image 2
matches (np array): matches between keypoints in image 1 and image 2
Returns:
np arrat of shape [3,3]: Homography matrix
"""
kp1 = []
kp2 = []
for match in matches:
kp1.append(base_features[match.queryIdx])
kp2.append(next_features[match.trainIdx])
p1_array = np.array([k.pt for k in kp1])
p2_array = np.array([k.pt for k in kp2])
homography, status = cv2.findHomography(p1_array, p2_array, method = cv2.RANSAC,
ransacReprojThreshold = 5.0,
mask = None,
maxIters = 2000,
confidence = 0.995)
#### Finding the euclidean distance error ####
list1 = np.array(p2_array)
list2 = np.array(p1_array)
list2 = np.reshape(list2, (len(list2), 2))
ones = np.ones(len(list1))
TestPoints = np.transpose(np.reshape(list1, (len(list1), 2)))
print ("Length:", np.shape(TestPoints), np.shape(ones))
TestPointsHom = np.vstack((TestPoints, ones))
print ("Homogenous Points:", np.shape(TestPointsHom))
projectedPointsH = np.matmul(homography, TestPointsHom) # projecting the points in test image to collage image using homography matrix
projectedPointsNH = np.transpose(np.array([np.true_divide(projectedPointsH[0,:], projectedPointsH[2,:]), np.true_divide(projectedPointsH[1,:], projectedPointsH[2,:])]))
print ("list2 shape:", np.shape(list2))
print ("NH Points shape:", np.shape(projectedPointsNH))
print ("Raw Error Vector:", np.shape(np.linalg.norm(projectedPointsNH-list2, axis=1)))
Error = int(np.sum(np.linalg.norm(projectedPointsNH-list2, axis=1)))
print ("Total Error:", Error)
AvgError = np.divide(np.array(Error), np.array(len(list1)))
print ("Average Error:", AvgError)
##################
return homography, status, AvgError
def warp(self, next_frame_rgb, H):
""" warps the current frame based of calculated homography H
Args:
next_frame_rgb (np array): current frame
H (np array of shape [3,3]): homography matrix
Returns:
np array: image output of mosaicing
"""
warped_img = cv2.warpPerspective(
next_frame_rgb, H, (self.output_img.shape[1], self.output_img.shape[0]),
flags=cv2.INTER_LINEAR)
transformed_corners = self.get_transformed_corners(next_frame_rgb, H)
warped_img = self.draw_border(warped_img, transformed_corners)
self.output_img[warped_img > 0] = warped_img[warped_img > 0]
output_temp = np.copy(self.output_img)
output_temp = self.draw_border(output_temp, transformed_corners, color=(0, 0, 255))
# Visualize the stitched result
if self.visualize:
output_temp_copy = output_temp/255.
output_temp_copy = cv2.normalize(output_temp_copy, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) # convert float64 to unit8
size = 720
heightM, widthM = output_temp_copy.shape[:2]
ratio = size / float(heightM)
output_temp_copy = cv2.resize(output_temp_copy, (int(ratio * widthM), size), interpolation=cv2.INTER_AREA)
cv2.imshow('output', output_temp_copy)
return self.output_img
# staticmethod
def get_transformed_corners(next_frame_rgb, H):
"""finds the corner of the current frame after warp
Args:
next_frame_rgb (np array): current frame
H (np array of shape [3,3]): Homography matrix
Returns:
[np array]: a list of 4 corner points after warping
"""
corner_0 = np.array([0, 0])
corner_1 = np.array([next_frame_rgb.shape[1], 0])
corner_2 = np.array([next_frame_rgb.shape[1], next_frame_rgb.shape[0]])
corner_3 = np.array([0, next_frame_rgb.shape[0]])
corners = np.array([[corner_0, corner_1, corner_2, corner_3]], dtype=np.float32)
transformed_corners = cv2.perspectiveTransform(corners, H)
transformed_corners = np.array(transformed_corners, dtype=np.int32)
# output_temp = np.copy(output_img)
# mask = np.zeros(shape=(output_temp.shape[0], output_temp.shape[1], 1))
# cv2.fillPoly(mask, transformed_corners, color=(1, 0, 0))
# cv2.imshow('mask', mask)
return transformed_corners
def draw_border(self, image, corners, color=(0, 0, 0)):
"""This functions draw rectancle border
Args:
image ([type]): current mosaiced output
corners (np array): list of corner points
color (tuple, optional): color of the border lines. Defaults to (0, 0, 0).
Returns:
np array: the output image with border
"""
for i in range(corners.shape[1]-1, -1, -1):
cv2.line(image, tuple(corners[0, i, :]), tuple(
corners[0, i-1, :]), thickness=5, color=color)
return image
#staticmethod
def stitchedimg_crop(stitched_img):
"""This functions crop the black edge
Args:
stitched_img (np array): stitched image with black edge
Returns:
np array: the output image with no black edge
"""
stitched_img = cv2.normalize(stitched_img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) # convert float64 to unit8
# Crop black edges
stitched_img_gray = cv2.cvtColor(stitched_img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(stitched_img_gray, 1, 255, cv2.THRESH_BINARY)
dino, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print ("Cropping black edge of stitched image ...")
print ("Found %d contours...\n" % (len(contours)))
max_area = 0
best_rect = (0,0,0,0)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
deltaHeight = h-y
deltaWidth = w-x
if deltaHeight < 0 or deltaWidth < 0:
deltaHeight = h+y
deltaWidth = w+x
area = deltaHeight * deltaWidth
if ( area > max_area and deltaHeight > 0 and deltaWidth > 0):
max_area = area
best_rect = (x,y,w,h)
if ( max_area > 0 ):
final_img_crop = stitched_img[best_rect[1]:best_rect[1]+best_rect[3],
best_rect[0]:best_rect[0]+best_rect[2]]
return final_img_crop
def main():
images = sorted(glob.glob(image_dir + "/*.jpg"),
key=lambda x: int(os.path.splitext(os.path.basename(x))[0][5:]))
# read the first frame
first_frame = cv2.imread(key_frame)
heightM, widthM = first_frame.shape[:2]
first_frame = cv2.resize(first_frame, (int(widthM / resize_ratio),
int(heightM / resize_ratio)),
interpolation=cv2.INTER_AREA)
image_stitching = ImageStitching(first_frame)
round = 2
for next_img_path in images[1:]:
print (f'Reading {Back.YELLOW}%s{Style.RESET_ALL}...' % next_img_path)
next_frame_rgb = cv2.imread(next_img_path)
heightM, widthM = next_frame_rgb.shape[:2]
next_frame_rgb = cv2.resize(next_frame_rgb, (int(widthM / resize_ratio),
int(heightM / resize_ratio)),
interpolation=cv2.INTER_AREA)
print ("Stitching %d / %d of image ..." % (round,len(images)))
# process each frame
image_stitching.process_adj_frame(next_frame_rgb)
round += 1
if round > len(images):
print ("Please press 'q' to continue the process ...")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imwrite('mosaic.jpg', image_stitching.output_img)
final_img_crop = image_stitching.stitchedimg_crop(image_stitching.output_img)
print ("Image stitching done ...")
cv2.imwrite("%s/Normal.JPG" % output_dir, final_img_crop)
# Save important results into csv file
tuplelist = tuple(image_stitching.record)
workbook = xls.Workbook('Normal.xlsx')
worksheet = workbook.add_worksheet("Normal")
row = 0
col = 0
worksheet.write(row, col, 'number_pairs')
worksheet.write(row, col + 1, 'basefeature')
worksheet.write(row, col + 2, 'nextfeature')
worksheet.write(row, col + 3, 'no_match_lr')
worksheet.write(row, col + 4, 'match_rate')
worksheet.write(row, col + 5, 'no_GMSmatches (OFF)')
worksheet.write(row, col + 6, 'gms_match_rate')
worksheet.write(row, col + 7, 'inlier')
worksheet.write(row, col + 8, 'inlierratio')
worksheet.write(row, col + 9, 'reproerror')
row += 1
number = 1
# Iterate over the data and write it out row by row.
for basefeature, nextfeature, no_match_lr, no_GMSmatches, inlier, inlierratio, reproerror in (tuplelist):
worksheet.write(row, col, number)
worksheet.write(row, col + 1, basefeature)
worksheet.write(row, col + 2, nextfeature)
worksheet.write(row, col + 3, no_match_lr)
match_rate = no_match_lr / ((basefeature+nextfeature)/2)
worksheet.write(row, col + 4, match_rate)
worksheet.write(row, col + 5, no_GMSmatches)
gms_match_rate = no_GMSmatches / ((basefeature+nextfeature)/2)
worksheet.write(row, col + 6, gms_match_rate)
worksheet.write(row, col + 7, inlier)
worksheet.write(row, col + 8, inlierratio)
worksheet.write(row, col + 9, reproerror)
number += 1
row += 1
workbook.close()
""""""""""""""""""""""""""""""""""""""""""""" Main """""""""""""""""""""""""""""""""""""""
if __name__ == "__main__":
program_start = time.process_time()
main()
program_end = time.process_time()
print (f'Program elapsed time: {Back.GREEN}%s s{Style.RESET_ALL}\n' % str(program_end-program_start))
Eventually I changed the way of warping the image using the approach provided by Jahaniam Real Time Video Mosaic. He locates the reference image at the middle of preset size of blank image and compute the subsequent homography and warp the adjacent images to the reference image.
Example of stitched image

Image stitching

I've recorded the video while bottle was rotated.Then i got frames from video and cut the central block from all images.
So for all frames I got the following images:
I've tried to stitch them to get panorama, but I got bad results.
I used the following program:
import glob
#rom panorama import Panorama
import sys
import numpy
import imutils
import cv2
def readImages(imageString):
images = []
# Get images from arguments.
for i in range(0, len(imageString)):
img = cv2.imread(imageString[i])
images.append(img)
return images
def findAndDescribeFeatures(image):
# Getting gray image
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find and describe the features.
# Fast: sift = cv2.xfeatures2d.SURF_create()
sift = cv2.xfeatures2d.SIFT_create()
# Find interest points.
keypoints = sift.detect(grayImage, None)
# Computing features.
keypoints, features = sift.compute(grayImage, keypoints)
# Converting keypoints to numbers.
keypoints = numpy.float32([kp.pt for kp in keypoints])
return keypoints, features
def matchFeatures(featuresA, featuresB):
# Slow: featureMatcher = cv2.DescriptorMatcher_create("BruteForce")
featureMatcher = cv2.DescriptorMatcher_create("FlannBased")
matches = featureMatcher.knnMatch(featuresA, featuresB, k=2)
return matches
def generateHomography(allMatches, keypointsA, keypointsB, ratio, ransacRep):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 4:
H, status = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, ransacRep)
return matches, H, status
else:
return None
paths = glob.glob("C:/Users/andre/Desktop/Panorama-master/frames/*.jpg")
images = readImages(paths[::-1])
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
try:
try:
allMatches = matchFeatures(featuresR, featuresL)
_, H, _ = generateHomography(allMatches, interestsR, interestsL, 0.75, 4.0)
result = cv2.warpPerspective(imgR, H,
(imgR.shape[1] + imgL.shape[1], imgR.shape[0]))
result[0:imgL.shape[0], 0:imgL.shape[1]] = imgL
images.append(result)
except TypeError:
pass
except cv2.error:
pass
result = imutils.resize(images[0], height=260)
cv2.imshow("Result", result)
cv2.imwrite("Result.jpg", result)
cv2.waitKey(0)
My result was:
May be someone know hot to do it better? I think that using small blocks from frame should remove roundness... But...
Data: https://1drv.ms/f/s!ArcAdXhy6TxPho0FLKxyRCL-808Y9g
I managed to achieve a nice result. I rewrote your code just a little bit, here is the changed part:
def generateTransformation(allMatches, keypointsA, keypointsB, ratio):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 2:
transformation = cv2.estimateRigidTransform(pointsA, pointsB, True)
if transformation is None or transformation.shape[1] < 1 or transformation.shape[0] < 1:
return None
return transformation
else:
return None
paths = glob.glob("a*.jpg")
images = readImages(paths[::-1])
result = images[0]
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
allMatches = matchFeatures(featuresR, featuresL)
transformation = generateTransformation(allMatches, interestsR, interestsL, 0.75)
if transformation is None or transformation[0, 2] < 0:
images.append(imgR)
continue
transformation[0, 0] = 1
transformation[1, 1] = 1
transformation[0, 1] = 0
transformation[1, 0] = 0
transformation[1, 2] = 0
result = cv2.warpAffine(imgR, transformation, (imgR.shape[1] +
int(transformation[0, 2] + 1), imgR.shape[0]))
result[:, :imgL.shape[1]] = imgL
cv2.imshow("R", result)
images.append(result)
cv2.waitKey(1)
cv2.imshow("Result", result)
So the key thing I changed is the transformation of the images. I use estimateRigidTransform() instead of findHomography() to calculate transformation of the image. From that transformation matrix I only extract the x coordinate translation, which is in the [0, 2] cell of the resulting Affine Transformation matrix transformation. I set the other transformation matrix elements as if it is an identity transformation (no scaling, no perspective, no rotation or y translation). Then I pass it to warpAffine() to transform the imgR the same way you did with warpPerspective().
You can do it because you have stable camera and spinning object positions and you capture with a straight front view of the object. It means that you don't have to do any perspective / scaling / rotation image corrections and can just "glue" them together by x axis.
I think your approach fails because you actually observe the bottle with a slightly tilted down camera view or the bottle is not in the middle of the screen. I'll try to describe that with an image. I depict some text on the bottle with red. For example the algorithm finds a matching points pair (green) on the bottom of the captured round object. Note that the point moves not only right, but diagonally up too. The program then calculates the transformation taking into account the points which move up slightly. This continues to get worse frame by frame.
The recognition of matching image points also may be slightly inaccurate, so extracting only the x translation is even better because you give the algorithm "a clue" what actual situation you have. This makes it less applicable for another conditions, but in your case it improves the result a lot.
Also I filter out some incorrect results with if transformation[0, 2] < 0 check (it can rotate only one direction, and the code wont work if that is negative anyways).

Stitch two images using Homography transform - Transformed image cropped

Image stitching not work properly. The warped image is cropped and interpolation cannot be done because images do not intersect.
Hi,
I was assigned an homework in which I have to stitch togheter two images, shot by different cameras.
I should find the homography matrix and then warp the second image using this matrix. At the end I must interpolate the two images.
Unfortunately, the code I wrote seems not to work properly. During second image warp I lost most of the image information; a lot of pixels are black and not the whole transformed image is transformed.
I track in the two images four pixels each, in the same order. Below you can find the piece of code I wrote.
# Globals
points = []
def show_and_fetch(image, title):
cv2.namedWindow(title, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(title, mouse_callback)
# Show the image
cv2.imshow(title, image)
# Wait for user input to continue
cv2.waitKey(0)
cv2.destroyAllWindows()
# mouse callback function
def mouse_callback(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
points.append([x, y])
def stitching():
"""
This procedure stiches two images
:return:
"""
print "Stitching starts..."
###########################################################################
# Get input information
in_file_1 = utils.get_input(
"Insert 0 to exit, the path to the first image to stitch "
"or empty input to use default image: ", "string",
constants.default_stitching1)
in_file_2 = utils.get_input(
"Insert 0 to exit, the path to the second image to stitch "
"or empty input to use default image: ", "string",
constants.default_stitching2)
image_1 = utils.read_image(in_file_1)
image_2 = utils.read_image(in_file_2)
global points
show_and_fetch(image_1, "Image 1 to Stitch")
image_1_points = np.asarray(points, dtype=np.float32)
points = []
show_and_fetch(image_2, "Image 2 to Stitch")
image_2_points = np.asarray(points, dtype=np.float32)
matrix, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, 5)
image_1_warped = cv2.warpPerspective(image_1, matrix, dsize=image_1.shape[0:2])
utils.show_image_and_wait(image_1_warped, 'Image 1 warped', wait=False)
utils.show_image_and_wait(image_1, 'Image 1', wait=False)
utils.show_image_and_wait(image_2, 'Image 2')
if __name__ == "__main__":
stitching()
I expect the warped image to be transformed, preserving the most of the information, in terms of pixels. Then interpolation should apply the intersection of the two images that overlap in a certain area.
For instance I want to interpolete these two images:
I've managed to stitch images based on this solution. Here is the stitching result:
Here is the full code:
import cv2
import imutils
import numpy as np
class Stitcher(object):
def __init__(self):
self.isv3 = imutils.is_cv3()
def stitch(self, images, ratio=0.75, reprojThresh=4.0, showMatches=False):
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# match features between the two images
m = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
if not m:
return None
# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = m
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
# return a tuple of the stitched image and the
# visualization
return result, vis
# return the stitched image
return result
def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.xfeatures2d.SIFT_create()
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.xfeatures2d.SIFT_create()
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return kps, features
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
image1 = cv2.imread('image1.jpg')
image2 = cv2.imread('image2.jpg')
stitcher = Stitcher()
(result, vis) = stitcher.stitch([image1, image2], showMatches=True)
cv2.imwrite('result.jpg', result)
I faced with the same problem. It turns out that the order of my images was wrong.
I had two images for stitching. One needs to stitch from left to another. However, I was computing the transform as I want it to stitch from right.

Mask Issue With Python OpenCV ORB Image Alignment

I am trying to implement a Python (3.7) OpenCV (3.4.3) ORB image alignment. I normally do most of my processing with ImageMagick. But I need to do some image alignment and am trying to use Python OpenCV ORB. My script is based upon one from Satya Mallick's Learn OpenCV tutorial at https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/.
However, I am trying to modify it to use a rigid alignment rather than a perspective homology and to filter the points using a mask to limit the difference in y values, since the images are nearly aligned already.
The mask approach was taken from a FLANN alignment code in the last example at https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html.
My script works fine, if I remove the matchesMask, which should provide the point filtering. (I have two other working scripts. One is similar, but just filters the points and ignores the mask. The other is based upon the ECC algorithm.)
However, I would like to understand why my code below is not working.
Perhaps the structure of my mask is incorrect in current versions of Python Opencv?
The error that I get is:
Traceback (most recent call last):
File "warp_orb_rigid2_filter.py", line 92, in <module>
imReg, m = alignImages(im, imReference)
File "warp_orb_rigid2_filter.py", line 62, in alignImages
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
SystemError: <built-in function drawMatches> returned NULL without setting an error
Here is my code. The first arrow shows where the mask is created. The second arrow shows the line I have to remove to get the script to work. But then it ignores my filtering of points.
#!/bin/python3.7
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches and filter by diffy
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2])
newpoints2 = np.empty(shape=[0, 2])
matches_Mask = [0] * len(matches)
# filter points by using mask
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
print(diffy)
if abs(diffy) < 10.0:
newpoints1 = np.append(newpoints1, [pt1], axis=0)
newpoints2 = np.append(newpoints2, [pt2], axis=0)
matches_Mask[i]=[1,0] #<--- mask created
print(matches_Mask)
draw_params = dict(matchColor = (255,0,),
singlePointColor = (255,255,0),
matchesMask = matches_Mask, #<---- remove mask here
flags = 0)
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
cv2.imwrite("/Users/fred/desktop/lena_matches.png", imMatches)
# Find Affine Transformation
# true means full affine, false means rigid (SRT)
m = cv2.estimateRigidTransform(newpoints1,newpoints2,False)
# Use affine transform to warp im1 to match im2
height, width, channels = im2.shape
im1Reg = cv2.warpAffine(im1, m, (width, height))
return im1Reg, m
if __name__ == '__main__':
# Read reference image
refFilename = "/Users/fred/desktop/lena.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "/Users/fred/desktop/lena_r1.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be stored in imReg.
# The estimated transform will be stored in m.
imReg, m = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "/Users/fred/desktop/lena_r1_aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated Affine Transform : \n", m)
Here are my two images: lena and lena rotated by 1 degree. Note that these are not my actual images. These image have no diffy values > 10, but my actual images do.
I am trying to align and warp the rotated image to match the original lena image.
The way you are creating the mask is incorrect. It only needs to be a list with single numbers, with each number telling you whether you want to use that particular feature match.
Therefore, replace this line:
matches_Mask = [[0,0] for i in range(len(matches))]
With this:
matches_Mask = [0] * len(matches)
... so:
# matches_Mask = [[0,0] for i in range(len(matches))]
matches_Mask = [0] * len(matches)
This creates a list of 0s that is as long as the number of matches. Finally, you need to change writing to the mask with a single value:
if abs(diffy) < 10.0:
#matches_Mask[i]=[1,0] #<--- mask created
matches_Mask[i] = 1
I finally get this:
Estimated Affine Transform :
[[ 1.00001187 0.01598318 -5.05963793]
[-0.01598318 1.00001187 -0.86121051]]
Take note that the format of the mask is different depending on what matcher you use. In this case, you use brute force matching so the mask needs to be in the format that I just described.
If you used FLANN's knnMatch for example, then it will be a nested list of lists, with each element being a list that is k long. For example, if you had k=3 and five keypoints, it will be a list of five elements long, with each element being a three element list. Each element in the sub-list delineates what match you want to use for drawing.

Categories

Resources