AttributeError: 'tuple' object has no attribute 'sort' - python

Here is my code, and i am getting an AttributeError: 'tuple' object has no attribute 'sort. I am trying to do an image alignment and found this standard image alignment code in an article. I am learning openCV and python which i am really new too, I am able to do basic stuff with openCV right now i am trying to learn image alignment and i am stuck on this part.
Traceback (most recent call last):
File "test9.py", line 31, in <module>
matches.sort(key = lambda x: x.distance)
AttributeError: 'tuple' object has no attribute 'sort'
------------------
(program exited with code: 1)
Press return to continue
import cv2
import numpy as np
# Open the image files.
img1_color = cv2.imread("/home/pi/Desktop/Project AOI/testboard1/image_2.jpg") # Image to be aligned.
img2_color = cv2.imread("/home/pi/Desktop/Project AOI/testboard1/image_0.jpg") # Reference image.
# Convert to grayscale.
img1 = cv2.cvtColor(img1_color, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
orb_detector = cv2.ORB_create(5000)
# Find keypoints and descriptors.
# The first arg is the image, second arg is the mask
# (which is not required in this case).
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# We create a Brute Force matcher with
# Hamming distance as measurement mode.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key = lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches)*0.9)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)
# Use this matrix to transform the
# colored image wrt the reference image.
transformed_img = cv2.warpPerspective(img1_color,
homography, (width, height))
# Save the output.
cv2.imwrite('output.jpg', transformed_img)

You're getting a tuple returned, not a list. You can't just matches.sort(...) that.
OpenCV, since v4.5.4, exhibits this behavior in its Python bindings generation.
You have to use this instead:
matches = sorted(matches, ...)
This creates a new list, which contains the sorted elements of the original tuple.
Related issues:
https://github.com/opencv/opencv/issues/21266
https://github.com/opencv/opencv/issues/21284

Related

OpenCV error: (-215:Assertion failed) (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 in function 'warpPerspective'

I am trying to perform image registration for two RGB lung mask images of size 128x128. This had worked fine for other images when I was learning image registration but now somehow it throws such error. I am a newbie learning this, any help is appreciated.
I have attached the code of what I am trying to do below, where I have created a registerImage function by following GeeksForGeeks and passed images which I want to register.
import cv2
import numpy as np
def registerImage(img1,img2):
# Open the image files.
img1_color = img1 # Image to be aligned.
img2_color = img2 # Reference image.
# Convert to grayscale.
img1 = cv2.cvtColor(img1_color, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
## used to creates keypoints on the reference image
orb_detector = cv2.ORB_create(5000)
# Find keypoints and descriptors.
# The first arg is the image, second arg is the mask
# (which is not required in this case).
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# We create a Brute Force matcher with
# Hamming distance as measurement mode.
#Brute-Force matcher is simple.
#It takes the descriptor of one feature in first set and is matched with all other features in second set using some distance calculation. And the closest one is returned.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key = lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches)*0.9)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)
# Use this matrix to transform the
# colored image wrt the reference image.
transformed_img = cv2.warpPerspective(img1_color,
homography, (width, height))
# Save the output.
# cv2.imwrite('output.jpg', transformed_img)
img1_show = cv2.resize(img1_color,(320,320))
img2_show = cv2.resize(img2_color,(320,320))
img3_show = cv2.resize(transformed_img,(320,320))
img = np.concatenate((img1_show,img2_show,img3_show), axis=1)
cv2_imshow(img)
ref_path = path + "/mask_0.png"
test_path = path + "/mask_8.png"
from google.colab.patches import cv2_imshow
ref_mask = cv2.imread(ref_path)
cv2_imshow(ref_mask)
test_mask = cv2.imread(test_path)
cv2_imshow(test_mask)
registerImage(ref_mask,test_mask)
############################################################################
Error:
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-18-b7a8933e693e> in <module>()
----> 1 registerImage(ref_mask,test_mask)
<ipython-input-2-3a703c66a8e0> in registerImage(img1, img2)
54 # colored image wrt the reference image.
55 transformed_img = cv2.warpPerspective(img1_color,
---> 56 homography, (width, height))
57
58 # Save the output.
error: OpenCV(4.1.2) /io/opencv/modules/imgproc/src/imgwarp.cpp:3167: error: (-215:Assertion failed) (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 in function 'warpPerspective'
Go backwards from where you got the error and trace it back to find what is giving you the error.
You're getting errors at this statement:
transformed_img = cv2.warpPerspective(img1_color,
homography, (width, height))
What I'd suggest is to check the variables themselves that are fed into the function. Here's a crude but simple way to do it.
# Place these statements prior to the error and check their values
print("hemography:",hemography) # In my case this was None, which is why I got an error
print("h:",height,"w:", width) # Check width/height seems correct
cv2_imshow(img1_color) # Check image is loaded properly
Also worth noting if it comes up while others are searching. For similar code I got an error at the matches.sort(..) line due to the type being Tuple and not a list.
To fix it I changed it to the following:
matches = sorted(matches,key=lambda x:x.distance, reverse=False)
Debugging is a skill you'll pickup over time, as are asking/answering questions here. That error isn't simple to understand if you're new to python, which is how I found this in the first place. The documentation will become easier to understand over time also.

How do I add additional photos to my code in OpenCV Python for my Stitching Project?

Honestly, I am pretty lost. I was finally able to stitch these two pictures together but am unsure on how to update my code to incorporate more than two photos. How would I change my code in order to allow for multiple picture stitchings? Below is what I have so far, and I should mention that the pictures I am using are low quality, so other simpler examples I found either did not work or could not use all of the pictures I needed. If someone could just give me a general direction on how I would begin to alter this code for up to five pictures, I would appreciate it.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imageio
cv2.ocl.setUseOpenCL(False)
import warnings
warnings.filterwarnings('ignore')
#sift is a feature descriptor that helps locate pixel coordinates i.e. corner detector
feature_extraction_algo = 'sift'
feature_to_match = 'bf'
#train image needs to be the one transformed
train_photo = cv2.imread('Stitching/Images/Log771/Log2.bmp')
#converting from BGR to RGB for Matplotlib
train_photo = cv2.cvtColor(train_photo, cv2.COLOR_BGR2RGB)
train_photo_crop = train_photo[0:10000, 425:750]
#converting to gray scale
train_photo_gray = cv2.cvtColor(train_photo_crop, cv2.COLOR_RGB2GRAY)
#Do the same for the query image
query_photo = cv2.imread('Stitching/Images/Log771/Log3.bmp')
query_photo = cv2.cvtColor(query_photo, cv2.COLOR_BGR2RGB)
query_photo_crop = query_photo[0:10000, 425:750]
query_photo_gray = cv2.cvtColor(query_photo_crop, cv2.COLOR_RGB2GRAY)
#crop both images
#view/plot images
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, constrained_layout=False, figsize=(16,9))
ax1.imshow(query_photo_crop, cmap="gray")
ax1.set_xlabel("Query Image", fontsize=14)
ax2.imshow(train_photo_crop, cmap="gray")
ax2.set_xlabel("Train Image", fontsize=14)
plt.savefig("./"+'.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpeg')
plt.show()
#sift.detectAndCompute() gets keypoints and descriptors--helps to determine how similar or different keypoints are-- ie. one picture is
#huge and one is small. Keypoints match but are not similar enough, which is where descriptors come in.
#to compare the keypoints in vector format
def select_descriptor_methods(image, method=None):
assert method is not None, "Please define a feature descriptor method. accepted Values are: 'sift, 'surf'"
if method == 'sift':
descriptor = cv2.SIFT_create()
elif method == 'surf':
descriptor = cv2.SURF_create()
elif method == 'brisk':
descriptor = cv2.BRISK_create()
elif method =='orb':
descriptor = cv2.ORB_create()
(keypoints, features) = descriptor.detectAndCompute (image, None)
return (keypoints, features)
keypoints_train_img, features_train_img = select_descriptor_methods(train_photo_gray, method=feature_extraction_algo)
keypoints_query_img, features_query_img = select_descriptor_methods(query_photo_gray, method=feature_extraction_algo)
for keypoint in keypoints_query_img:
x,y = keypoint.pt
size = keypoint.size
orientation = keypoint.angle
response = keypoint.response
octave = keypoint.octave
class_id = keypoint.class_id
print (x,y)
print(size)
print(orientation)
print(response)
print(octave)
print(class_id)
print(len(keypoints_query_img))
features_query_img.shape
#Noting a basic fact that - SIFT descriptor is computed for every key-point detected in the image.
#Before computing descriptor, you probably used a detector (as Harris, Sift or Surf Detector) to detect points of interest. Detecting key-points and computing descriptors are two independent steps!
#drawing keypoints using drawKeypoints(input image,
# keypoints, output image, color, flag) -- keypoints based off input picture
#Displaying keypoints and features on both detected images
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20,8), constrained_layout=False)
ax1.imshow(cv2.drawKeypoints(train_photo_gray, keypoints_query_img, None, color=(0,255,0)))
ax1.set_xlabel("(a)", fontsize=14)
ax2.imshow(cv2.drawKeypoints(query_photo_gray, keypoints_query_img,None,color=(0,255,0)))
ax2.set_xlabel("(b)", fontsize=14)
plt.savefig("./Stitching/" + feature_extraction_algo + "Images" + '.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpg')
plt.show()
def create_matching_object(method,crossCheck):
"Create and return a Matcher Object"
# For BF matcher, first we have to create the BFMatcher object using cv2.BFMatcher().
# It takes two optional params.
# normType - It specifies the distance measurement
# crossCheck - which is false by default. If it is true, Matcher returns only those matches
# with value (i,j) such that i-th descriptor in set A has j-th descriptor in set B as the best match
# and vice-versa.
if method == 'sift' or method == 'surf':
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method == 'orb' or method == 'brisk':
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
return bf
def key_points_matching(features_train_img, features_query_img, method):
bf = create_matching_object(method, crossCheck=True)
# Match descriptors.
best_matches = bf.match(features_train_img,features_query_img)
# Sort the features in order of distance.
# The points with small distance (more similarity) are ordered first in the vector
rawMatches = sorted(best_matches, key = lambda x:x.distance)
print("Raw matches with Brute force):", len(rawMatches))
return rawMatches
def key_points_matching_KNN(features_train_img, features_query_img, ratio, method):
bf = create_matching_object(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
rawMatches = bf.knnMatch(features_train_img, features_query_img, k=2)
print("Raw matches (knn):", len(rawMatches))
matches = []
#loop over raw matches
for m,n in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if m.distance < n.distance * ratio:
matches.append(m)
return matches
print("Drawing: {} matched features Lines".format(feature_to_match))
fig = plt.figure(figsize=(20,8))
if feature_to_match == 'bf':
matches = key_points_matching(features_train_img, features_query_img, method=feature_extraction_algo)
mapped_features_image = cv2.drawMatches(train_photo_crop,keypoints_train_img,query_photo_crop,keypoints_query_img,matches[:100],None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Now for cross checking draw the feature-mapping lines also with KNN
elif feature_to_match == 'knn':
matches = key_points_matching_KNN(features_train_img, features_query_img, ratio=0.75, method=feature_extraction_algo)
mapped_features_image_knn = cv2.drawMatches(train_photo_crop, keypoints_train_img, query_photo_crop, keypoints_query_img, np.random.choice(matches,50),None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(mapped_features_image)
plt.axis('off')
plt.savefig("./Stitching/" + feature_to_match + "_matching_img_log_"+'.jpeg', bbox_inches='tight', dpi=300, optimize=True, format='jpeg')
plt.show()
feature_to_match = 'knn'
print("Drawing: {} matched features Lines".format(feature_to_match))
fig = plt.figure(figsize=(20,8))
if feature_to_match == 'bf':
matches = key_points_matching(features_train_img, features_query_img, method=feature_extraction_algo)
mapped_features_image = cv2.drawMatches(train_photo_crop,keypoints_train_img,query_photo_crop,keypoints_query_img,matches[:100],
None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Now for cross checking draw the feature-mapping lines also with KNN
elif feature_to_match == 'knn':
matches = key_points_matching_KNN(features_train_img, features_query_img, ratio=0.75, method=feature_extraction_algo)
mapped_features_image_knn = cv2.drawMatches(train_photo_crop, keypoints_train_img, query_photo_crop, keypoints_query_img, np.random.choice(matches,100),None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(mapped_features_image_knn)
plt.axis('off')
plt.savefig("./Stitching/" + feature_to_match + "_Images"+'.jpg', bbox_inches='tight', dpi=300, optimize=True, format='jpg')
plt.show()
def homography_stitching(keypoints_train_img, keypoints_query_img, matches, reprojThresh):
keypoints_train_img = np.float32([keypoint.pt for keypoint in keypoints_train_img])
keypoints_query_img = np.float32([keypoint.pt for keypoint in keypoints_query_img])
''' For findHomography() - I need to have an assumption of a minimum of correspondence points that are present between the 2 images. Here, I am assuming that Minimum Match Count to be 4 '''
if len(matches) > 4:
# construct the two sets of points
points_train = np.float32([keypoints_train_img[m.queryIdx] for m in matches])
points_query = np.float32([keypoints_query_img[m.trainIdx] for m in matches])
# Calculate the homography between the sets of points
(H, status) = cv2.findHomography(points_train, points_query, cv2.RANSAC, reprojThresh)
return (matches, H, status)
else:
return None
M = homography_stitching(keypoints_train_img, keypoints_query_img, matches, reprojThresh=4)
if M is None:
print("Error!")
(matches, Homography_Matrix, status) = M
print(Homography_Matrix)
#Finally, we can apply our transformation by calling the cv2.warpPerspective function. The first parameter is our
# original image that we want to warp,
#the second is our transformation matrix M (which will be obtained from homography_stitching),
#and the final parameter is a tuple, used to indicate the width and height of the output image.
# For the calculation of the width and height of the final horizontal panoramic images
# I can just add the widths of the individual images and for the height
# I can take the max from the 2 individual images.
width = query_photo_crop.shape[1] + train_photo_crop.shape[1]
print("width ", width)
# 2922 - Which is exactly the sum value of the width of
# my train.jpg and query.jpg
height = max(query_photo_crop.shape[0], train_photo_crop.shape[0])
# otherwise, apply a perspective warp to stitch the images together
# Now just plug that "Homography_Matrix" into cv::warpedPerspective and I shall have a warped image1 into image2 frame
result = cv2.warpPerspective(train_photo_crop, Homography_Matrix, (width, height))
# The warpPerspective() function returns an image or video whose size is the same as the size of the original image or video. Hence set the pixels as per my query_photo
result[0:query_photo_crop.shape[0], 0:query_photo_crop.shape[1]] = query_photo_crop
plt.figure(figsize=(20,10))
plt.axis('off')
plt.imshow(result)
imageio.imwrite("./Stitching/Images/Log771/finishedLog"+'.jpg', result)
plt.show()
Simple method:
OpenCV makes it easy for you with the help of cv2.Stitcher_create module. Everything is handled internally, right from identifying key feature points to matching them appropriately and finally warping the images. You can pass in more than 2 images to be stitched. But I must warn you the greater the number of images and/or their dimensions; the greater the time needed for computation.
How to use cv2.Stitcher_create module?
First, we need to create an instance of the class.
imageStitcher = cv2.Stitcher_create()
To get a list of all the functions associated with this class just type help(imageStitcher). It will return a list of all the functions along with its required input parameters and expected output.
The created instance contains a function stitch() which is used to create the panorama. stitch can be used in either of the following two ways:
ret, panorama_image = stitch(images):
Pass the list of all the images and the module identifies key features, matches them and yields the warped image
ret, panorama_image = stitch(images, masks):
Optionally, we can also pass a list of masks, with one mask corresponding to every image. A mask is a binary image comprising of black and white. The module looks for keypoints/features only in the white regions of every mask, proceeds to match them and yields the warped image.
Both the above ways also returns a variable ret, if the value is 0 it means stitching was performed without any issues.
The following code sample (which I borrowed) shows the first method:
Sample images to be stitched:
Code:
import os
import cv2
# path containing images to be stitched
path = 'stitching_images'
# append all the images within the path to a list
images = []
for image_file in os.listdir(path):
if image_file.endswith ('.jpg'):
img = cv2.imread(os.path.join(path, image_file))
images.append(img)
# creating instance of stitcher class
imageStitcher = cv2.Stitcher_create()
# call the 'stitch' function and pass in the list of images
status, stitched_img = imageStitcher.stitch(images)
# display the panorama if stitching is successful
if status = 0:
cv2.imshow('Panorama', stitched_img)
Result:
(Code and images borrowed from https://github.com/niconielsen32/ComputerVision/tree/master/imageStitching)
Hard method:
If you want to create a panorama using your code, I would suggest performing it sequentially:
Iterate through all the images in your collection (say A, B, C, D, E)
For each pair of images, find keypoints and match them. (AB, AC, AD, AE, BC, BD, etc..)
Select the pair where keypoint matches are the highest and stitch them using homography (say images A and C are warped to P1)
Next find keypoint matches between the stitched image and every other image in your collection (P1B, P1D, P1E)
Find the pair with most number of matches and stitch them
Repeat likewise

Mask Issue With Python OpenCV ORB Image Alignment

I am trying to implement a Python (3.7) OpenCV (3.4.3) ORB image alignment. I normally do most of my processing with ImageMagick. But I need to do some image alignment and am trying to use Python OpenCV ORB. My script is based upon one from Satya Mallick's Learn OpenCV tutorial at https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/.
However, I am trying to modify it to use a rigid alignment rather than a perspective homology and to filter the points using a mask to limit the difference in y values, since the images are nearly aligned already.
The mask approach was taken from a FLANN alignment code in the last example at https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html.
My script works fine, if I remove the matchesMask, which should provide the point filtering. (I have two other working scripts. One is similar, but just filters the points and ignores the mask. The other is based upon the ECC algorithm.)
However, I would like to understand why my code below is not working.
Perhaps the structure of my mask is incorrect in current versions of Python Opencv?
The error that I get is:
Traceback (most recent call last):
File "warp_orb_rigid2_filter.py", line 92, in <module>
imReg, m = alignImages(im, imReference)
File "warp_orb_rigid2_filter.py", line 62, in alignImages
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
SystemError: <built-in function drawMatches> returned NULL without setting an error
Here is my code. The first arrow shows where the mask is created. The second arrow shows the line I have to remove to get the script to work. But then it ignores my filtering of points.
#!/bin/python3.7
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches and filter by diffy
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2])
newpoints2 = np.empty(shape=[0, 2])
matches_Mask = [0] * len(matches)
# filter points by using mask
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
print(diffy)
if abs(diffy) < 10.0:
newpoints1 = np.append(newpoints1, [pt1], axis=0)
newpoints2 = np.append(newpoints2, [pt2], axis=0)
matches_Mask[i]=[1,0] #<--- mask created
print(matches_Mask)
draw_params = dict(matchColor = (255,0,),
singlePointColor = (255,255,0),
matchesMask = matches_Mask, #<---- remove mask here
flags = 0)
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None, **draw_params)
cv2.imwrite("/Users/fred/desktop/lena_matches.png", imMatches)
# Find Affine Transformation
# true means full affine, false means rigid (SRT)
m = cv2.estimateRigidTransform(newpoints1,newpoints2,False)
# Use affine transform to warp im1 to match im2
height, width, channels = im2.shape
im1Reg = cv2.warpAffine(im1, m, (width, height))
return im1Reg, m
if __name__ == '__main__':
# Read reference image
refFilename = "/Users/fred/desktop/lena.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "/Users/fred/desktop/lena_r1.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be stored in imReg.
# The estimated transform will be stored in m.
imReg, m = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "/Users/fred/desktop/lena_r1_aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated Affine Transform : \n", m)
Here are my two images: lena and lena rotated by 1 degree. Note that these are not my actual images. These image have no diffy values > 10, but my actual images do.
I am trying to align and warp the rotated image to match the original lena image.
The way you are creating the mask is incorrect. It only needs to be a list with single numbers, with each number telling you whether you want to use that particular feature match.
Therefore, replace this line:
matches_Mask = [[0,0] for i in range(len(matches))]
With this:
matches_Mask = [0] * len(matches)
... so:
# matches_Mask = [[0,0] for i in range(len(matches))]
matches_Mask = [0] * len(matches)
This creates a list of 0s that is as long as the number of matches. Finally, you need to change writing to the mask with a single value:
if abs(diffy) < 10.0:
#matches_Mask[i]=[1,0] #<--- mask created
matches_Mask[i] = 1
I finally get this:
Estimated Affine Transform :
[[ 1.00001187 0.01598318 -5.05963793]
[-0.01598318 1.00001187 -0.86121051]]
Take note that the format of the mask is different depending on what matcher you use. In this case, you use brute force matching so the mask needs to be in the format that I just described.
If you used FLANN's knnMatch for example, then it will be a nested list of lists, with each element being a list that is k long. For example, if you had k=3 and five keypoints, it will be a list of five elements long, with each element being a three element list. Each element in the sub-list delineates what match you want to use for drawing.

module' object has no attribute 'drawMatches' opencv python

I am just doing an example of feature detection in OpenCV. This example is shown below. It is giving me the following error
module' object has no attribute 'drawMatches'
I have checked the OpenCV Docs and am not sure why I'm getting this error. Does anyone know why?
import numpy as np
import cv2
import matplotlib.pyplot as plt
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('box_in_scene.png',0) # trainImage
# Initiate SIFT detector
orb = cv2.ORB()
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
plt.imshow(img3),plt.show()
Error:
Traceback (most recent call last):
File "match.py", line 22, in <module>
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
AttributeError: 'module' object has no attribute 'drawMatches'
I am late to the party as well, but I installed OpenCV 2.4.9 for Mac OS X, and the drawMatches function doesn't exist in my distribution. I've also tried the second approach with find_obj and that didn't work for me either. With that, I decided to write my own implementation of it that mimics drawMatches to the best of my ability and this is what I've produced.
I've provided my own images where one is of a camera man, and the other one is the same image but rotated by 55 degrees counterclockwise.
The basics of what I wrote is that I allocate an output RGB image where the amount of rows is the maximum of the two images to accommodate for placing both of the images in the output image and the columns are simply the summation of both the columns together. Be advised that I assume that both images are grayscale.
I place each image in their corresponding spots, then run through a loop of all of the matched keypoints. I extract which keypoints matched between the two images, then extract their (x,y) coordinates. I draw circles at each of the detected locations, then draw a line connecting these circles together.
Bear in mind that the detected keypoint in the second image is with respect to its own coordinate system. If you want to place this in the final output image, you need to offset the column coordinate by the amount of columns from the first image so that the column coordinate is with respect to the coordinate system of the output image.
Without further ado:
import numpy as np
import cv2
def drawMatches(img1, kp1, img2, kp2, matches):
"""
My own implementation of cv2.drawMatches as OpenCV 2.4.9
does not have this function available but it's supported in
OpenCV 3.0.0
This function takes in two images with their associated
keypoints, as well as a list of DMatch data structure (matches)
that contains which keypoints matched in which images.
An image will be produced where a montage is shown with
the first image followed by the second image beside it.
Keypoints are delineated with circles, while lines are connected
between matching keypoints.
img1,img2 - Grayscale images
kp1,kp2 - Detected list of keypoints through any of the OpenCV keypoint
detection algorithms
matches - A list of matches of corresponding keypoints through any
OpenCV keypoint matching algorithm
"""
# Create a new output image that concatenates the two images together
# (a.k.a) a montage
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
# Create the output image
# The rows of the output are the largest between the two images
# and the columns are simply the sum of the two together
# The intent is to make this a colour image, so make this 3 channels
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
# Place the first image to the left
out[:rows1,:cols1] = np.dstack([img1, img1, img1])
# Place the next image to the right of it
out[:rows2,cols1:] = np.dstack([img2, img2, img2])
# For each pair of points we have between both images
# draw circles, then connect a line between them
for mat in matches:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
# Draw a small circle at both co-ordinates
# radius 4
# colour blue
# thickness = 1
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
# Draw a line in between the two points
# thickness = 1
# colour blue
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255,0,0), 1)
# Show the image
cv2.imshow('Matched Features', out)
cv2.waitKey(0)
cv2.destroyWindow('Matched Features')
# Also return the image if you'd like a copy
return out
To illustrate that this works, here are the two images that I used:
I used OpenCV's ORB detector to detect the keypoints, and used the normalized Hamming distance as the distance measure for similarity as this is a binary descriptor. As such:
import numpy as np
import cv2
img1 = cv2.imread('cameraman.png', 0) # Original image - ensure grayscale
img2 = cv2.imread('cameraman_rot55.png', 0) # Rotated image - ensure grayscale
# Create ORB detector with 1000 keypoints with a scaling pyramid factor
# of 1.2
orb = cv2.ORB(1000, 1.2)
# Detect keypoints of original image
(kp1,des1) = orb.detectAndCompute(img1, None)
# Detect keypoints of rotated image
(kp2,des2) = orb.detectAndCompute(img2, None)
# Create matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Do matching
matches = bf.match(des1,des2)
# Sort the matches based on distance. Least distance
# is better
matches = sorted(matches, key=lambda val: val.distance)
# Show only the top 10 matches - also save a copy for use later
out = drawMatches(img1, kp1, img2, kp2, matches[:10])
This is the image I get:
To use with knnMatch from cv2.BFMatcher
I'd like to make a note where the above code only works if you assume that the matches appear in a 1D list. However, if you decide to use the knnMatch method from cv2.BFMatcher for example, what is returned is a list of lists. Specifically, given the descriptors in img1 called des1 and the descriptors in img2 called des2, each element in the list returned from knnMatch is another list of k matches from des2 which are the closest to each descriptor in des1. Therefore, the first element from the output of knnMatch is a list of k matches from des2 which were the closest to the first descriptor found in des1. The second element from the output of knnMatch is a list of k matches from des2 which were the closest to the second descriptor found in des1 and so on.
To make the most sense of knnMatch, you must limit the total amount of neighbours to match to k=2. The reason why is because you want to use at least two matched points for each source point available to verify the quality of the match and if the quality is good enough, you'll want to use these to draw your matches and show them on the screen. You can use a very simple ratio test (credit goes to David Lowe) to ensure that for a point, we see that the distance / dissimilarity in matching to the best point is much smaller than the distance / dissimilarity in matching to the second best point. We can capture this by finding the ratio of the distance of the best matched point to the second best matched point. The ratio should be small to illustrate that a point to its best matched point is unambiguous. If the ratio is close to 1, this means that both matches are equally as "good" and thus ambiguous so we should not include these. We can think of this as an outlier rejection technique. Therefore, to turn what is returned from knnMatch to what is required with the code I wrote above, iterate through the matches, use the above ratio test and check if it passes. If it does, add the first matched keypoint to a new list.
Assuming that you created all of the variables like you did before declaring the BFMatcher instance, you'd now do this to adapt the knnMatch method for using drawMatches:
# Create matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Perform KNN matching
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance / n.distance < 0.75: # Or you can do m.distance < 0.75 * n.distance
# Add the match for point m to the best
good.append(m)
# Or do a list comprehension
#good = [m for (m,n) in matches if m.distance < 0.75*n.distance]
# Now perform drawMatches
out = drawMatches(img1, kp1, img2, kp2, good)
As you iterate over the matches list, m and n should be the match between a point from des1 and its best match (m) and its second best match (n) both from des2. If we see that the ratio is small, we'll add this best match between the two points (m) to a final list. The ratio that I have, 0.75, is a parameter that needs tuning so if you're not getting good results, play around with this until you do. However, values between 0.7 to 0.8 are a good start.
I want to attribute the above modifications to user #ryanmeasel and the answer that these modifications were found is in his post: OpenCV Python : No drawMatchesknn function.
The drawMatches Function is not part of the Python interface.
As you can see in the docs, it is only defined for C++ at the moment.
Excerpt from the docs:
C++: void drawMatches(const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
C++: void drawMatches(const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch>>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char>>& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
If the function had a Python interface, you would find something like this:
Python: cv2.drawMatches(img1, keypoints1, [...])
EDIT
There actually was a commit that introduced this function 5 months ago. However, it is not (yet) in the official documentation.
Make sure you are using the newest OpenCV Version (2.4.7).
For sake of completeness the Functions interface for OpenCV 3.0.0 will looks like this:
cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches1to2[, outImg[, matchColor[, singlePointColor[, matchesMask[, flags]]]]]) → outImg
I know this question has an accepted answer that is correct, but if you are using OpenCV 2.4.8 and not 3.0(-dev), a workaround could be to use some functions from the included samples found in opencv\sources\samples\python2\find_obj
import cv2
from find_obj import filter_matches,explore_match
img1 = cv2.imread('../c/box.png',0) # queryImage
img2 = cv2.imread('../c/box_in_scene.png',0) # trainImage
# Initiate SIFT detector
orb = cv2.ORB()
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)
matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)
p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image
cv2.waitKey()
cv2.destroyAllWindows()
This is the output image:

3D normalised cross-correlation in Python

I'm currently doing 2D template matching using OpenCV's MatchTemplate function called from Python. I'm looking to extend my code into 3D but can't find any existing 3D cross-correlation programs. Can anyone help out?
Do you mean that you are currently looking for a known object somewhere in an image, and you are currently only able to handle that object being affine transformed (moved around on a 2D plane), but you want to be able to handle it being perspective transformed?
You could try using a SURF or SIFT algorithm to find features in your reference and unknown images:
def GetSurfPoints(image, mask)
surfDetector = cv2.FeatureDetector_create("SURF")
surfExtractor = cv2.DescriptorExtractor_create("SURF")
keyPoints = surfDetector.detect(image, mask)
keyPoints, descriptions = surfExtractor.compute(image, keyPoints)
return keyPoints, descriptions
Then use FLANN to find matching points (this is from one of the cv2 samples):
def MatchFlann(desc1, desc2, r_threshold = 0.6):
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)
flann = cv2.flann_Index(desc2, flann_params)
idx2, dist = flann.knnSearch(desc1, 2, params = {}) # bug: need to provide empty dict
mask = dist[:,0] / dist[:,1] < r_threshold
idx1 = numpy.arange(len(desc1))
matches = numpy.int32( zip(idx1, idx2[:,0]) )
return matches[mask]
Now if you want to, you could use FindHomography to find a transformation that aligns the two images:
referencePoints = numpy.array([keyPoints[match[0]].pt for match in matches])
newPoints = numpy.array([keyPoints[match[1]].pt for match in matches])
transformMatrix, mask = cv2.findHomography(newPoints, referencePoints, method = cv2.cv.CV_LMEDS)
You could then use WarpPerspective and that matrix to align the images. Or you could do something else with the set of matched points found earlier.

Categories

Resources