Image stitching not work properly. The warped image is cropped and interpolation cannot be done because images do not intersect.
Hi,
I was assigned an homework in which I have to stitch togheter two images, shot by different cameras.
I should find the homography matrix and then warp the second image using this matrix. At the end I must interpolate the two images.
Unfortunately, the code I wrote seems not to work properly. During second image warp I lost most of the image information; a lot of pixels are black and not the whole transformed image is transformed.
I track in the two images four pixels each, in the same order. Below you can find the piece of code I wrote.
# Globals
points = []
def show_and_fetch(image, title):
cv2.namedWindow(title, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(title, mouse_callback)
# Show the image
cv2.imshow(title, image)
# Wait for user input to continue
cv2.waitKey(0)
cv2.destroyAllWindows()
# mouse callback function
def mouse_callback(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
points.append([x, y])
def stitching():
"""
This procedure stiches two images
:return:
"""
print "Stitching starts..."
###########################################################################
# Get input information
in_file_1 = utils.get_input(
"Insert 0 to exit, the path to the first image to stitch "
"or empty input to use default image: ", "string",
constants.default_stitching1)
in_file_2 = utils.get_input(
"Insert 0 to exit, the path to the second image to stitch "
"or empty input to use default image: ", "string",
constants.default_stitching2)
image_1 = utils.read_image(in_file_1)
image_2 = utils.read_image(in_file_2)
global points
show_and_fetch(image_1, "Image 1 to Stitch")
image_1_points = np.asarray(points, dtype=np.float32)
points = []
show_and_fetch(image_2, "Image 2 to Stitch")
image_2_points = np.asarray(points, dtype=np.float32)
matrix, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, 5)
image_1_warped = cv2.warpPerspective(image_1, matrix, dsize=image_1.shape[0:2])
utils.show_image_and_wait(image_1_warped, 'Image 1 warped', wait=False)
utils.show_image_and_wait(image_1, 'Image 1', wait=False)
utils.show_image_and_wait(image_2, 'Image 2')
if __name__ == "__main__":
stitching()
I expect the warped image to be transformed, preserving the most of the information, in terms of pixels. Then interpolation should apply the intersection of the two images that overlap in a certain area.
For instance I want to interpolete these two images:
I've managed to stitch images based on this solution. Here is the stitching result:
Here is the full code:
import cv2
import imutils
import numpy as np
class Stitcher(object):
def __init__(self):
self.isv3 = imutils.is_cv3()
def stitch(self, images, ratio=0.75, reprojThresh=4.0, showMatches=False):
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# match features between the two images
m = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
if not m:
return None
# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = m
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
# return a tuple of the stitched image and the
# visualization
return result, vis
# return the stitched image
return result
def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.xfeatures2d.SIFT_create()
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.xfeatures2d.SIFT_create()
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return kps, features
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
image1 = cv2.imread('image1.jpg')
image2 = cv2.imread('image2.jpg')
stitcher = Stitcher()
(result, vis) = stitcher.stitch([image1, image2], showMatches=True)
cv2.imwrite('result.jpg', result)
I faced with the same problem. It turns out that the order of my images was wrong.
I had two images for stitching. One needs to stitch from left to another. However, I was computing the transform as I want it to stitch from right.
Related
Honestly, I am pretty lost. I was finally able to stitch these two pictures together but am unsure on how to update my code to incorporate more than two photos. How would I change my code in order to allow for multiple picture stitchings? Below is what I have so far, and I should mention that the pictures I am using are low quality, so other simpler examples I found either did not work or could not use all of the pictures I needed. If someone could just give me a general direction on how I would begin to alter this code for up to five pictures, I would appreciate it.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imageio
cv2.ocl.setUseOpenCL(False)
import warnings
warnings.filterwarnings('ignore')
#sift is a feature descriptor that helps locate pixel coordinates i.e. corner detector
feature_extraction_algo = 'sift'
feature_to_match = 'bf'
#train image needs to be the one transformed
train_photo = cv2.imread('Stitching/Images/Log771/Log2.bmp')
#converting from BGR to RGB for Matplotlib
train_photo = cv2.cvtColor(train_photo, cv2.COLOR_BGR2RGB)
train_photo_crop = train_photo[0:10000, 425:750]
#converting to gray scale
train_photo_gray = cv2.cvtColor(train_photo_crop, cv2.COLOR_RGB2GRAY)
#Do the same for the query image
query_photo = cv2.imread('Stitching/Images/Log771/Log3.bmp')
query_photo = cv2.cvtColor(query_photo, cv2.COLOR_BGR2RGB)
query_photo_crop = query_photo[0:10000, 425:750]
query_photo_gray = cv2.cvtColor(query_photo_crop, cv2.COLOR_RGB2GRAY)
#crop both images
#view/plot images
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, constrained_layout=False, figsize=(16,9))
ax1.imshow(query_photo_crop, cmap="gray")
ax1.set_xlabel("Query Image", fontsize=14)
ax2.imshow(train_photo_crop, cmap="gray")
ax2.set_xlabel("Train Image", fontsize=14)
plt.savefig("./"+'.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpeg')
plt.show()
#sift.detectAndCompute() gets keypoints and descriptors--helps to determine how similar or different keypoints are-- ie. one picture is
#huge and one is small. Keypoints match but are not similar enough, which is where descriptors come in.
#to compare the keypoints in vector format
def select_descriptor_methods(image, method=None):
assert method is not None, "Please define a feature descriptor method. accepted Values are: 'sift, 'surf'"
if method == 'sift':
descriptor = cv2.SIFT_create()
elif method == 'surf':
descriptor = cv2.SURF_create()
elif method == 'brisk':
descriptor = cv2.BRISK_create()
elif method =='orb':
descriptor = cv2.ORB_create()
(keypoints, features) = descriptor.detectAndCompute (image, None)
return (keypoints, features)
keypoints_train_img, features_train_img = select_descriptor_methods(train_photo_gray, method=feature_extraction_algo)
keypoints_query_img, features_query_img = select_descriptor_methods(query_photo_gray, method=feature_extraction_algo)
for keypoint in keypoints_query_img:
x,y = keypoint.pt
size = keypoint.size
orientation = keypoint.angle
response = keypoint.response
octave = keypoint.octave
class_id = keypoint.class_id
print (x,y)
print(size)
print(orientation)
print(response)
print(octave)
print(class_id)
print(len(keypoints_query_img))
features_query_img.shape
#Noting a basic fact that - SIFT descriptor is computed for every key-point detected in the image.
#Before computing descriptor, you probably used a detector (as Harris, Sift or Surf Detector) to detect points of interest. Detecting key-points and computing descriptors are two independent steps!
#drawing keypoints using drawKeypoints(input image,
# keypoints, output image, color, flag) -- keypoints based off input picture
#Displaying keypoints and features on both detected images
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20,8), constrained_layout=False)
ax1.imshow(cv2.drawKeypoints(train_photo_gray, keypoints_query_img, None, color=(0,255,0)))
ax1.set_xlabel("(a)", fontsize=14)
ax2.imshow(cv2.drawKeypoints(query_photo_gray, keypoints_query_img,None,color=(0,255,0)))
ax2.set_xlabel("(b)", fontsize=14)
plt.savefig("./Stitching/" + feature_extraction_algo + "Images" + '.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpg')
plt.show()
def create_matching_object(method,crossCheck):
"Create and return a Matcher Object"
# For BF matcher, first we have to create the BFMatcher object using cv2.BFMatcher().
# It takes two optional params.
# normType - It specifies the distance measurement
# crossCheck - which is false by default. If it is true, Matcher returns only those matches
# with value (i,j) such that i-th descriptor in set A has j-th descriptor in set B as the best match
# and vice-versa.
if method == 'sift' or method == 'surf':
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method == 'orb' or method == 'brisk':
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
return bf
def key_points_matching(features_train_img, features_query_img, method):
bf = create_matching_object(method, crossCheck=True)
# Match descriptors.
best_matches = bf.match(features_train_img,features_query_img)
# Sort the features in order of distance.
# The points with small distance (more similarity) are ordered first in the vector
rawMatches = sorted(best_matches, key = lambda x:x.distance)
print("Raw matches with Brute force):", len(rawMatches))
return rawMatches
def key_points_matching_KNN(features_train_img, features_query_img, ratio, method):
bf = create_matching_object(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
rawMatches = bf.knnMatch(features_train_img, features_query_img, k=2)
print("Raw matches (knn):", len(rawMatches))
matches = []
#loop over raw matches
for m,n in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if m.distance < n.distance * ratio:
matches.append(m)
return matches
print("Drawing: {} matched features Lines".format(feature_to_match))
fig = plt.figure(figsize=(20,8))
if feature_to_match == 'bf':
matches = key_points_matching(features_train_img, features_query_img, method=feature_extraction_algo)
mapped_features_image = cv2.drawMatches(train_photo_crop,keypoints_train_img,query_photo_crop,keypoints_query_img,matches[:100],None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Now for cross checking draw the feature-mapping lines also with KNN
elif feature_to_match == 'knn':
matches = key_points_matching_KNN(features_train_img, features_query_img, ratio=0.75, method=feature_extraction_algo)
mapped_features_image_knn = cv2.drawMatches(train_photo_crop, keypoints_train_img, query_photo_crop, keypoints_query_img, np.random.choice(matches,50),None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(mapped_features_image)
plt.axis('off')
plt.savefig("./Stitching/" + feature_to_match + "_matching_img_log_"+'.jpeg', bbox_inches='tight', dpi=300, optimize=True, format='jpeg')
plt.show()
feature_to_match = 'knn'
print("Drawing: {} matched features Lines".format(feature_to_match))
fig = plt.figure(figsize=(20,8))
if feature_to_match == 'bf':
matches = key_points_matching(features_train_img, features_query_img, method=feature_extraction_algo)
mapped_features_image = cv2.drawMatches(train_photo_crop,keypoints_train_img,query_photo_crop,keypoints_query_img,matches[:100],
None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Now for cross checking draw the feature-mapping lines also with KNN
elif feature_to_match == 'knn':
matches = key_points_matching_KNN(features_train_img, features_query_img, ratio=0.75, method=feature_extraction_algo)
mapped_features_image_knn = cv2.drawMatches(train_photo_crop, keypoints_train_img, query_photo_crop, keypoints_query_img, np.random.choice(matches,100),None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(mapped_features_image_knn)
plt.axis('off')
plt.savefig("./Stitching/" + feature_to_match + "_Images"+'.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpg')
plt.show()
def homography_stitching(keypoints_train_img, keypoints_query_img, matches, reprojThresh):
keypoints_train_img = np.float32([keypoint.pt for keypoint in keypoints_train_img])
keypoints_query_img = np.float32([keypoint.pt for keypoint in keypoints_query_img])
''' For findHomography() - I need to have an assumption of a minimum of correspondence points that are present between the 2 images. Here, I am assuming that Minimum Match Count to be 4 '''
if len(matches) > 4:
# construct the two sets of points
points_train = np.float32([keypoints_train_img[m.queryIdx] for m in matches])
points_query = np.float32([keypoints_query_img[m.trainIdx] for m in matches])
# Calculate the homography between the sets of points
(H, status) = cv2.findHomography(points_train, points_query, cv2.RANSAC, reprojThresh)
return (matches, H, status)
else:
return None
M = homography_stitching(keypoints_train_img, keypoints_query_img, matches, reprojThresh=4)
if M is None:
print("Error!")
(matches, Homography_Matrix, status) = M
print(Homography_Matrix)
#Finally, we can apply our transformation by calling the cv2.warpPerspective function. The first parameter is our
# original image that we want to warp,
#the second is our transformation matrix M (which will be obtained from homography_stitching),
#and the final parameter is a tuple, used to indicate the width and height of the output image.
# For the calculation of the width and height of the final horizontal panoramic images
# I can just add the widths of the individual images and for the height
# I can take the max from the 2 individual images.
width = query_photo_crop.shape[1] + train_photo_crop.shape[1]
print("width ", width)
# 2922 - Which is exactly the sum value of the width of
# my train.jpg and query.jpg
height = max(query_photo_crop.shape[0], train_photo_crop.shape[0])
# otherwise, apply a perspective warp to stitch the images together
# Now just plug that "Homography_Matrix" into cv::warpedPerspective and I shall have a warped image1 into image2 frame
result = cv2.warpPerspective(train_photo_crop, Homography_Matrix, (width, height))
# The warpPerspective() function returns an image or video whose size is the same as the size of the original image or video. Hence set the pixels as per my query_photo
result[0:query_photo_crop.shape[0], 0:query_photo_crop.shape[1]] = query_photo_crop
plt.figure(figsize=(20,10))
plt.axis('off')
plt.imshow(result)
imageio.imwrite("./Stitching/Images/Log771/finishedLog"+'.jpg', result)
plt.show()
Simple method:
OpenCV makes it easy for you with the help of cv2.Stitcher_create module. Everything is handled internally, right from identifying key feature points to matching them appropriately and finally warping the images. You can pass in more than 2 images to be stitched. But I must warn you the greater the number of images and/or their dimensions; the greater the time needed for computation.
How to use cv2.Stitcher_create module?
First, we need to create an instance of the class.
imageStitcher = cv2.Stitcher_create()
To get a list of all the functions associated with this class just type help(imageStitcher). It will return a list of all the functions along with its required input parameters and expected output.
The created instance contains a function stitch() which is used to create the panorama. stitch can be used in either of the following two ways:
ret, panorama_image = stitch(images):
Pass the list of all the images and the module identifies key features, matches them and yields the warped image
ret, panorama_image = stitch(images, masks):
Optionally, we can also pass a list of masks, with one mask corresponding to every image. A mask is a binary image comprising of black and white. The module looks for keypoints/features only in the white regions of every mask, proceeds to match them and yields the warped image.
Both the above ways also returns a variable ret, if the value is 0 it means stitching was performed without any issues.
The following code sample (which I borrowed) shows the first method:
Sample images to be stitched:
Code:
import os
import cv2
# path containing images to be stitched
path = 'stitching_images'
# append all the images within the path to a list
images = []
for image_file in os.listdir(path):
if image_file.endswith ('.jpg'):
img = cv2.imread(os.path.join(path, image_file))
images.append(img)
# creating instance of stitcher class
imageStitcher = cv2.Stitcher_create()
# call the 'stitch' function and pass in the list of images
status, stitched_img = imageStitcher.stitch(images)
# display the panorama if stitching is successful
if status = 0:
cv2.imshow('Panorama', stitched_img)
Result:
(Code and images borrowed from https://github.com/niconielsen32/ComputerVision/tree/master/imageStitching)
Hard method:
If you want to create a panorama using your code, I would suggest performing it sequentially:
Iterate through all the images in your collection (say A, B, C, D, E)
For each pair of images, find keypoints and match them. (AB, AC, AD, AE, BC, BD, etc..)
Select the pair where keypoint matches are the highest and stitch them using homography (say images A and C are warped to P1)
Next find keypoint matches between the stitched image and every other image in your collection (P1B, P1D, P1E)
Find the pair with most number of matches and stitch them
Repeat likewise
I am working on image registration of OCT data. I would like to locate the regions/area in my targeted registered image, where image registration has actually occurred from the source images. I am working in Python. Can anyone please tell me what are the available techniques?
Any suggestions on how to proceed with the problem are also welcomed. I have done some trial image registration on two images initially. The goal is to do registration of a large dataset.
My code is given below:
#importing libraries
import cv2
import numpy as np
# from skimage.measure import structural_similarity as ssim
# from skimage.measure import compare_ssim
import skimage
from skimage import measure
import matplotlib.pyplot as plt
def imageRegistration():
# open the image files
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
# converting to greyscale
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
orb_detector = cv2.ORB_create(5000)
# Find keypoints and descriptors.
# The first arg is the image, second arg is the mask
# (which is not reqiured in this case).
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# We create a Brute Force matcher with
# Hamming distance as measurement mode.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key=lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches) * 90)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)
# Use this matrix to transform the
# colored image wrt the reference image.
transformed_img = cv2.warpPerspective(image1,
homography, (width, height))
# Save the output.
cv2.imwrite('output.jpg', transformed_img)
#following is the code figuring out difference in the source image, target image and the registered image
# 0 mse means perfect similarity , no difference
# mse >1 means there is difference and as the value increases , the difference increases
def findingDifferenceMSE():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
err = np.sum((image1.astype("float") - image3.astype("float")) ** 2)
err /= float(image1.shape[0] * image3.shape[1])
print("MSE:")
print('Image 104 and output image: ', + err)
err1 = np.sum((image2.astype("float") - image3.astype("float")) ** 2)
err1 /= float(image2.shape[0] * image3.shape[1])
print('Image 0 and output image: ', + err1)
def findingDifferenceSSIM():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
result1=measure.compare_ssim(image1,image3)
print(result1)
#calling the fucntion
imageRegistration()
findingDifferenceMSE()
#findingDifferenceSSIM()
This is the registered image:
This image is the first reference image:
This is the second reference image:
Image differentiation technique can be used to identify the registered area in the images by comparing it with base images. In this way, the different areas will be recognized.
I've recorded the video while bottle was rotated.Then i got frames from video and cut the central block from all images.
So for all frames I got the following images:
I've tried to stitch them to get panorama, but I got bad results.
I used the following program:
import glob
#rom panorama import Panorama
import sys
import numpy
import imutils
import cv2
def readImages(imageString):
images = []
# Get images from arguments.
for i in range(0, len(imageString)):
img = cv2.imread(imageString[i])
images.append(img)
return images
def findAndDescribeFeatures(image):
# Getting gray image
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find and describe the features.
# Fast: sift = cv2.xfeatures2d.SURF_create()
sift = cv2.xfeatures2d.SIFT_create()
# Find interest points.
keypoints = sift.detect(grayImage, None)
# Computing features.
keypoints, features = sift.compute(grayImage, keypoints)
# Converting keypoints to numbers.
keypoints = numpy.float32([kp.pt for kp in keypoints])
return keypoints, features
def matchFeatures(featuresA, featuresB):
# Slow: featureMatcher = cv2.DescriptorMatcher_create("BruteForce")
featureMatcher = cv2.DescriptorMatcher_create("FlannBased")
matches = featureMatcher.knnMatch(featuresA, featuresB, k=2)
return matches
def generateHomography(allMatches, keypointsA, keypointsB, ratio, ransacRep):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 4:
H, status = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, ransacRep)
return matches, H, status
else:
return None
paths = glob.glob("C:/Users/andre/Desktop/Panorama-master/frames/*.jpg")
images = readImages(paths[::-1])
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
try:
try:
allMatches = matchFeatures(featuresR, featuresL)
_, H, _ = generateHomography(allMatches, interestsR, interestsL, 0.75, 4.0)
result = cv2.warpPerspective(imgR, H,
(imgR.shape[1] + imgL.shape[1], imgR.shape[0]))
result[0:imgL.shape[0], 0:imgL.shape[1]] = imgL
images.append(result)
except TypeError:
pass
except cv2.error:
pass
result = imutils.resize(images[0], height=260)
cv2.imshow("Result", result)
cv2.imwrite("Result.jpg", result)
cv2.waitKey(0)
My result was:
May be someone know hot to do it better? I think that using small blocks from frame should remove roundness... But...
Data: https://1drv.ms/f/s!ArcAdXhy6TxPho0FLKxyRCL-808Y9g
I managed to achieve a nice result. I rewrote your code just a little bit, here is the changed part:
def generateTransformation(allMatches, keypointsA, keypointsB, ratio):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 2:
transformation = cv2.estimateRigidTransform(pointsA, pointsB, True)
if transformation is None or transformation.shape[1] < 1 or transformation.shape[0] < 1:
return None
return transformation
else:
return None
paths = glob.glob("a*.jpg")
images = readImages(paths[::-1])
result = images[0]
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
allMatches = matchFeatures(featuresR, featuresL)
transformation = generateTransformation(allMatches, interestsR, interestsL, 0.75)
if transformation is None or transformation[0, 2] < 0:
images.append(imgR)
continue
transformation[0, 0] = 1
transformation[1, 1] = 1
transformation[0, 1] = 0
transformation[1, 0] = 0
transformation[1, 2] = 0
result = cv2.warpAffine(imgR, transformation, (imgR.shape[1] +
int(transformation[0, 2] + 1), imgR.shape[0]))
result[:, :imgL.shape[1]] = imgL
cv2.imshow("R", result)
images.append(result)
cv2.waitKey(1)
cv2.imshow("Result", result)
So the key thing I changed is the transformation of the images. I use estimateRigidTransform() instead of findHomography() to calculate transformation of the image. From that transformation matrix I only extract the x coordinate translation, which is in the [0, 2] cell of the resulting Affine Transformation matrix transformation. I set the other transformation matrix elements as if it is an identity transformation (no scaling, no perspective, no rotation or y translation). Then I pass it to warpAffine() to transform the imgR the same way you did with warpPerspective().
You can do it because you have stable camera and spinning object positions and you capture with a straight front view of the object. It means that you don't have to do any perspective / scaling / rotation image corrections and can just "glue" them together by x axis.
I think your approach fails because you actually observe the bottle with a slightly tilted down camera view or the bottle is not in the middle of the screen. I'll try to describe that with an image. I depict some text on the bottle with red. For example the algorithm finds a matching points pair (green) on the bottom of the captured round object. Note that the point moves not only right, but diagonally up too. The program then calculates the transformation taking into account the points which move up slightly. This continues to get worse frame by frame.
The recognition of matching image points also may be slightly inaccurate, so extracting only the x translation is even better because you give the algorithm "a clue" what actual situation you have. This makes it less applicable for another conditions, but in your case it improves the result a lot.
Also I filter out some incorrect results with if transformation[0, 2] < 0 check (it can rotate only one direction, and the code wont work if that is negative anyways).
I am trying to implement a Python (3.7) OpenCV (3.4.3) ORB image alignment. I normally do most of my processing with ImageMagick. But I need to do some image alignment and am trying to use Python OpenCV ORB. My script is based upon one from Satya Mallick's Learn OpenCV tutorial at https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/.
However, I am trying to modify it to use a rigid alignment rather than a perspective homology and to filter the points using a mask to limit the difference in y values, since the images are nearly aligned already.
The mask approach was taken from a FLANN alignment code in the last example at https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html.
My script works fine, if I remove the matchesMask, which should provide the point filtering. (I have two other working scripts. One is similar, but just filters the points and ignores the mask. The other is based upon the ECC algorithm.)
However, I would like to understand why my code below is not working.
Perhaps the structure of my mask is incorrect in current versions of Python Opencv?
The error that I get is:
Traceback (most recent call last):
File "warp_orb_rigid2_filter.py", line 92, in <module>
imReg, m = alignImages(im, imReference)
File "warp_orb_rigid2_filter.py", line 62, in alignImages
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
SystemError: <built-in function drawMatches> returned NULL without setting an error
Here is my code. The first arrow shows where the mask is created. The second arrow shows the line I have to remove to get the script to work. But then it ignores my filtering of points.
#!/bin/python3.7
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches and filter by diffy
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2])
newpoints2 = np.empty(shape=[0, 2])
matches_Mask = [0] * len(matches)
# filter points by using mask
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
print(diffy)
if abs(diffy) < 10.0:
newpoints1 = np.append(newpoints1, [pt1], axis=0)
newpoints2 = np.append(newpoints2, [pt2], axis=0)
matches_Mask[i]=[1,0] #<--- mask created
print(matches_Mask)
draw_params = dict(matchColor = (255,0,),
singlePointColor = (255,255,0),
matchesMask = matches_Mask, #<---- remove mask here
flags = 0)
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
cv2.imwrite("/Users/fred/desktop/lena_matches.png", imMatches)
# Find Affine Transformation
# true means full affine, false means rigid (SRT)
m = cv2.estimateRigidTransform(newpoints1,newpoints2,False)
# Use affine transform to warp im1 to match im2
height, width, channels = im2.shape
im1Reg = cv2.warpAffine(im1, m, (width, height))
return im1Reg, m
if __name__ == '__main__':
# Read reference image
refFilename = "/Users/fred/desktop/lena.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "/Users/fred/desktop/lena_r1.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be stored in imReg.
# The estimated transform will be stored in m.
imReg, m = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "/Users/fred/desktop/lena_r1_aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated Affine Transform : \n", m)
Here are my two images: lena and lena rotated by 1 degree. Note that these are not my actual images. These image have no diffy values > 10, but my actual images do.
I am trying to align and warp the rotated image to match the original lena image.
The way you are creating the mask is incorrect. It only needs to be a list with single numbers, with each number telling you whether you want to use that particular feature match.
Therefore, replace this line:
matches_Mask = [[0,0] for i in range(len(matches))]
With this:
matches_Mask = [0] * len(matches)
... so:
# matches_Mask = [[0,0] for i in range(len(matches))]
matches_Mask = [0] * len(matches)
This creates a list of 0s that is as long as the number of matches. Finally, you need to change writing to the mask with a single value:
if abs(diffy) < 10.0:
#matches_Mask[i]=[1,0] #<--- mask created
matches_Mask[i] = 1
I finally get this:
Estimated Affine Transform :
[[ 1.00001187 0.01598318 -5.05963793]
[-0.01598318 1.00001187 -0.86121051]]
Take note that the format of the mask is different depending on what matcher you use. In this case, you use brute force matching so the mask needs to be in the format that I just described.
If you used FLANN's knnMatch for example, then it will be a nested list of lists, with each element being a list that is k long. For example, if you had k=3 and five keypoints, it will be a list of five elements long, with each element being a three element list. Each element in the sub-list delineates what match you want to use for drawing.
I'm trying to stitch two images together, but I'm failing to do so because the program doesn't detect enough matches between the two images
Here's the code:
import numpy as np
import imutils
import cv2
class Stitcher:
def __init__(self):
self.isv3 = imutils.is_cv3()
def stitch(self, images, ratio=0.75, reprojThresh=5.0,
showMatches=False):
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)
if M is None:
return None
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
return (result, vis)
return result
def detectAndDescribe(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if self.isv3:
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
else:
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
And here are the original images:
Image A:
Image B:
matched points:
The result of stitching:
The result is nowhere as desired, and if I'm correct it's due to there not being enough matching points between the two images.