Angle between 2 ArUco markers planes - python

I want to measure the deviation of the angle of an ArUco marker to a plane defined by a second reference ArUco marker.
A reference ArUco marker (M1) is fixed against a flat wall and a second ArUco marker (M2) is a few centimeters in front of that same wall. I want to know when the marker M2 is deviating more than 10 degrees from the xy plane of M1.
Here is an illustration of the configuration:
To do so, I thaught I should calculate the relative rotation between the pose rvec as explained in this post:
Relative rotation between pose (rvec)
that was proposing the following code:
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT
with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
Computing the composedRvec, I get the following results :
With both ArUco markers in the same plane (composedRvec values in the top right corner) :
With both ArUco markers at a 90 degrees angle:
I do not really understand the results:
Ok for with the 0,0,0 composedRvec when markers in the same plane.
But why 0,1.78,0 in the second case?
What general condition should I have on the resulting composedRvec to tell me when the angle between the 2 markers is above 10 degrees?
Am I even following the right strategy with the composedRvec?
**** EDIT ***
Results of the 2 markers in the same xy plane with a 40° angle:
||composedRvec||= sqrt(0.619^2+0.529^2+0.711^2)=1.08 rad = 61.87°
**** EDIT 2 ***
By retaking measurements in the 40° angle configuration, I found out that the values are quite fluctuating even without modifying the set up or lightening. From time to time, I fall on the correct values:
||composedRvec||= sqrt(0.019^2+0.012^2+0.74^2)=0.74 rad = 42.4° which is quite accurate.
**** EDIT 3 ***
So here is my final code based on #Gilles-Philippe Paillé's edited answer:
import numpy as np
import cv2
import cv2.aruco as aruco
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Get the camera source
img_path='D:/your_path/'
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(img_path+"camera.yml", cv2.FILE_STORAGE_READ)
matrix_coefficients = cv_file.getNode("K").mat()
distortion_coefficients = cv_file.getNode("D").mat()
nb_markers=2
def track(matrix_coefficients, distortion_coefficients):
while True:
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.custom_dictionary(nb_markers, 5)
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray,
aruco_dict,parameters=parameters,cameraMatrix=matrix_coefficients,distCoeff=distortion_coefficients)
# store rz1 and rz2
R_list=[]
if np.all(ids is not None): # If there are markers found by detector
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients,
distortion_coefficients)
(rvec - tvec).any() # get rid of that nasty numpy value array error
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
R, _ = cv2.Rodrigues(rvec)
# convert (np.matrix(R).T) matrix to array using np.squeeze(np.asarray()) to get rid off the ValueError: shapes (1,3) and (1,3) not aligned
R = np.squeeze(np.asarray(np.matrix(R).T))
R_list.append(R[2])
# Display the resulting frame
if len(R_list) == 2:
print('##############')
angle_radians = np.arccos(np.dot(R_list[0], R_list[1]))
angle_degrees=angle_radians*180/np.pi
print(angle_degrees)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3000) & 0xFF
if key == ord('q'):
break
track(matrix_coefficients, distortion_coefficients)
And here are some results:
red -> real angle, white -> measured angle
This is out of the scope of the question but I find that the pose estimation is quite fluctuating. For example when the 2 markers are against the wall, the values easely jump from 9° to 37° without touching the system.

The result uses the Angle-axis representation, i.e., the norm of the vector is the angle of rotation (what you want), and the direction of the vector is the axis of rotation.
You are looking for θ = ||composedRvec||. Note that the result is in radians. The condition would be ||composedRvec|| > 10*π/180.
Edit: To only consider the angle between the Z-axis of both planes, convert the two rotation vectors rvec1 and rvec2 into matrices and extract the 3rd columns. The angle is then angle_radians = np.arccos(np.dot(rz1, rz2))

Related

Camera calibration, focal length value seems too large

I tried a camera calibration with python and opencv to find the camera matrix. I used the following code from this link
https://automaticaddison.com/how-to-perform-camera-calibration-using-opencv/
import cv2 # Import the OpenCV library to enable computer vision
import numpy as np # Import the NumPy scientific computing library
import glob # Used to get retrieve files that have a specified pattern
# Path to the image that you want to undistort
distorted_img_filename = r'C:\Users\uid20832\3.jpg'
# Chessboard dimensions
number_of_squares_X = 10 # Number of chessboard squares along the x-axis
number_of_squares_Y = 7 # Number of chessboard squares along the y-axis
nX = number_of_squares_X - 1 # Number of interior corners along x-axis
nY = number_of_squares_Y - 1 # Number of interior corners along y-axis
# Store vectors of 3D points for all chessboard images (world coordinate frame)
object_points = []
# Store vectors of 2D points for all chessboard images (camera coordinate frame)
image_points = []
# Set termination criteria. We stop either when an accuracy is reached or when
# we have finished a certain number of iterations.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Define real world coordinates for points in the 3D coordinate frame
# Object points are (0,0,0), (1,0,0), (2,0,0) ...., (5,8,0)
object_points_3D = np.zeros((nX * nY, 3), np.float32)
# These are the x and y coordinates
object_points_3D[:,:2] = np.mgrid[0:nY, 0:nX].T.reshape(-1, 2)
def main():
# Get the file path for images in the current directory
images = glob.glob(r'C:\Users\Kalibrierung\*.jpg')
# Go through each chessboard image, one by one
for image_file in images:
# Load the image
image = cv2.imread(image_file)
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the corners on the chessboard
success, corners = cv2.findChessboardCorners(gray, (nY, nX), None)
# If the corners are found by the algorithm, draw them
if success == True:
# Append object points
object_points.append(object_points_3D)
# Find more exact corner pixels
corners_2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
# Append image points
image_points.append(corners)
# Draw the corners
cv2.drawChessboardCorners(image, (nY, nX), corners_2, success)
# Display the image. Used for testing.
#cv2.imshow("Image", image)
# Display the window for a short period. Used for testing.
#cv2.waitKey(200)
# Now take a distorted image and undistort it
distorted_image = cv2.imread(distorted_img_filename)
# Perform camera calibration to return the camera matrix, distortion coefficients, rotation and translation vectors etc
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points,
image_points,
gray.shape[::-1],
None,
None)
But I think I always get wrong parameters. My focal length is around 1750 in x and y direction from calibration. I think this couldnt be rigth, it is pretty much. The camera documentation says the focal lentgh is between 4-7 mm. But I am not sure, why it is so high from the calibration. Here are some of my photos for the calibration. Maybe something is wrong with them. I moved the chessboard under the camera in different directions, angles and high.
I was also wondering, why I dont need the size of the squares in the code. Can someone explains it to me or did I forgot this input somewhere?
Your misconception is about "focal length". It's an overloaded term.
"focal length" (unit mm) in the optical part: it describes the distance between the lens plane and image/sensor plane, assuming a focus to infinity
"focal length" (unit pixels) in the camera matrix: it describes a scale factor for mapping the real world to a picture of a certain resolution
1750 may very well be correct, if you have a high resolution picture (Full HD or something).
The calculation goes:
f [pixels] = (focal length [mm]) / (pixel pitch [µm / pixel])
(take care of the units and prefixes, 1 mm = 1000 µm)
Example: a Pixel 4a phone, which has 1.40 µm pixel pitch and 4.38 mm focal length, has f = ~3128.57 (= fx = fy).
Another example: A Pixel 4a has a diagonal Field of View of approximately 77.7 degrees, and a resolution of 4032 x 3024 pixels, so that's 5040 pixels diagonally. You can calculate:
f = (5040 / 2) / tan(~77.7° / 2)
f = ~3128.6 [pixels]
And that calculation you can apply to arbitrary cameras for which you know the field of view and picture size. Use horizontal FoV and horizontal resolution if the diagonal resolution is ambiguous. That can happen if the sensor isn't 16:9 but the video you take from it is cropped to 16:9... assuming the crop only crops vertically, and leaves the horizontal alone.
Why don't you need the size of the chessboard squares in this code? Because it only calibrates the intrinsic parameters (camera matrix and distortion coefficients). Those don't depend on the distance to the board or any other object in the scene.
If you were to calibrate extrinsic parameters, i.e. the distance of cameras in a stereo setup, then you would need to give the size of the squares.

Drawing ArUco's pose axes to wrong place

I am working on image processing project that detects ArUco markers and doing somethings according to those. When there is only one marker in the video or image everything works fine, but when I put another marker, the second ArUco marker's pose (axes) printing to the wrong place, not to the center of the marker. Please see the screenshot I shared for better understand.
The marker with the id[2] is fine. Pose axes are drawn to the center of marker.
But, the marker's pose axes with the id1 are drawn to some random point.
co
Here is the code part of that detects ArUco markers and draws the axis.
while True:
ret, frame = cap.read()
# Operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250) # Specify marker size as 4x4, 5x5, 6x6
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# Lists of ids and the corners beloning to each marker
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
try:
if np.all(ids is not None): # If there are markers found by detector
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients,
distortion_coefficients)
(rvec - tvec).any() # get rid of that nasty numpy value array error
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw axis
c_x = (corners[i][0][0][0] + corners[i][0][1][0] + corners[i][0][2][0] + corners[i][0][3][0]) / 4 # X coordinate of marker's center
c_y = (corners[i][0][0][1] + corners[i][0][1][1] + corners[i][0][2][1] + corners[i][0][3][1]) / 4 # Y coordinate of marker's center
cv2.putText(frame, "id"+str(ids[i]), (int(c_x), int(c_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50,225,250), 2)
except:
if ids is None or len(ids) == 0:
print("******************************************************")
print("*************** Marker Detection Failed **************")
print("******************************************************")

Get rotational shift using phase correlation and log polar transform

I have been working on a script which calculates the rotational shift between two images using cv2's phaseCorrelate method.
I have two images, the second is a 90 degree rotated version of the first image. After loading in the images, I convert them to log-polar before passing them into the phaseCorrelate function.
From what I have read, I believe that this should yield a rotational shift between two images.
The code below describes the implementation.
#bitwise right binary shift function
def rshift(val, n): return (val % 0x100000000)
base_img = cv2.imread('img1.jpg')
cur_img = cv2.imread('dataa//t_sv_1.jpg')
curr_img = rotateImage(cur_img, 90)
rows,cols,chan = base_img.shape
x, y, c = curr_img.shape
#convert images to valid type
ref32 = np.float32(cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY))
curr32 = np.float32(cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY))
value = np.sqrt(((rows/2.0)**2.0)+((cols/2.0)**2.0))
value2 = np.sqrt(((x/2.0)**2.0)+((y/2.0)**2.0))
polar_image = cv2.linearPolar(ref32,(rows/2, cols/2), value, cv2.WARP_FILL_OUTLIERS)
log_img = cv2.linearPolar(curr32,(x/2, y/2), value2, cv2.WARP_FILL_OUTLIERS)
shift = cv2.phaseCorrelate(polar_image, log_img)
sx = shift[0][0]
sy = shift[0][1]
sf = shift[1]
polar_image = polar_image.astype(np.uint8)
log_img = log_img.astype(np.uint8)
cv2.imshow("Polar Image", polar_image)
cv2.imshow('polar', log_img)
#get rotation from shift along y axis
rotation = sy * 180 / (rshift(y, 1));
print(rotation)
cv2.waitKey(0)
cv2.destroyAllWindows()
I am unsure how to interpret the results of this function. The expected outcome is a value similar to 90 degrees, however, I get the value below.
Output: -0.00717516014538333
How can I make the output correct?
A method, typically referred to as the Fourier Mellin transform, and published as:
B. Srinivasa Reddy and B.N. Chatterji, "An FFT-Based Technique for Translation, Rotation, and Scale-Invariant Image Registration", IEEE Trans. on Image Proc. 5(8):1266-1271, 1996
uses the FFT and the log-polar transform to obtain the translation, rotation and scaling of one image to match the other. I find this tutorial to be very clear and informative, I will give a summary here:
Compute the magnitude of the FFT of the two images (apply a windowing function first to avoid issues with periodicity of the FFT).
Compute the log-polar transform of the magnitude of the frequency-domain images (typically a high-pass filter is applied first, but I have not seen its usefulness).
Compute the cross-correlation (actually phase correlation) between the two. This leads to a knowledge of scale and rotation.
Apply the scaling and rotation to one of the original input images.
Compute the cross-correlation (actually phase correlation) of the original input images, after correction for scaling and rotation. This leads to knowledge of the translation.
This works because:
The magnitude of the FFT is translation-invariant, we can solely focus on scaling and rotation without worrying about translation. Note that the rotation of the image is identical to the rotation of the FFT, and that scaling of the image is inverse to the scaling of the FFT.
The log-polar transform converts rotation into a vertical translation, and scaling into a horizontal translation. Phase correlation allows us to determine these translations. Converting them to a rotation and scaling is non-trivial (especially the scaling is hard to get right, but a bit of math shows the way).
If the tutorial linked above is not clear enough, one can look at the C++ code that comes with it, or at this other Python code.
OP is interested only in the rotation aspect of the method above. If we can assume that the translation is 0 (this means we know around which point the rotation was made, if we don't know the origin we need to estimate it as a translation), then we don't need to compute the magnitude of the FFT (remember it is used to make the problem translation invariant), we can apply the log-polar transform directly to the images. But note that we need to use the center of rotation as the origin for the log-polar transform. If we additionally assume that the scaling is 1, we can further simplify things by taking the linear-polar transform. That is, we logarithmic scaling of the radius axis is only necessary to estimate scaling.
OP is doing this more or less correctly, I believe. Where OP's code goes wrong is in the extent of the radius axis in the polar transform. By going all the way to the extreme corners of the image, OpenCV needs to fill in parts of the transformed image with zeros. These parts are dictated by the shape of the image, not by the contents of the image. That is, both polar images contain exactly the same sharp, high-contrast curve between image content and filled-in zeros. The phase correlation is aligning these curves, leading to an estimate of 0 degree rotation. The image content is more or less ignored because its contrast is much lower.
Instead, make the extent of the radius axis that of the largest circle that fits completely inside the image. This way, no parts of the output need to be filled with zeros, and the phase correlation can focus on the actual image content. Furthermore, considering the two images are rotated versions of each other, it is likely that the data in the corners of the images do not match, there is no need to take that into account at all!
Here is code I implemented quickly based on OP's code. I read in Lena, rotated the image by 38 degrees, computed the linear-polar transform of the original and rotated images, then the phase correlation between these two, and then determined a rotation angle based on the vertical translation. The result was 37.99560, plenty close to 38.
import cv2
import numpy as np
base_img = cv2.imread('lena512color.tif')
base_img = np.float32(cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY)) / 255.0
(h, w) = base_img.shape
(cX, cY) = (w // 2, h // 2)
angle = 38
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
curr_img = cv2.warpAffine(base_img, M, (w, h))
cv2.imshow("base_img", base_img)
cv2.imshow("curr_img", curr_img)
base_polar = cv2.linearPolar(base_img,(cX, cY), min(cX, cY), 0)
curr_polar = cv2.linearPolar(curr_img,(cX, cY), min(cX, cY), 0)
cv2.imshow("base_polar", base_polar)
cv2.imshow("curr_polar", curr_polar)
(sx, sy), sf = cv2.phaseCorrelate(base_polar, curr_polar)
rotation = -sy / h * 360;
print(rotation)
cv2.waitKey(0)
cv2.destroyAllWindows()
These are the four image windows shown by the code:
I created a figure that shows the phase correlation values for multiple rotations. This has been edited to reflect Cris Luengo's comment. The image is cropped to get rid of the edges of the square insert.
import cv2
import numpy as np
paths = ["lena.png", "rotate45.png", "rotate90.png", "rotate135.png", "rotate180.png"]
import os
os.chdir('/home/stephen/Desktop/rotations/')
images, rotations, polar = [],[], []
for image_path in paths:
alignedImage = cv2.imread('lena.png')
rotatedImage = cv2.imread(image_path)
rows,cols,chan = alignedImage.shape
x, y, c = rotatedImage.shape
x,y,w,h = 220,220,360,360
alignedImage = alignedImage[y:y+h, x:x+h].copy()
rotatedImage = rotatedImage[y:y+h, x:x+h].copy()
#convert images to valid type
ref32 = np.float32(cv2.cvtColor(alignedImage, cv2.COLOR_BGR2GRAY))
curr32 = np.float32(cv2.cvtColor(rotatedImage, cv2.COLOR_BGR2GRAY))
value = np.sqrt(((rows/2.0)**2.0)+((cols/2.0)**2.0))
value2 = np.sqrt(((x/2.0)**2.0)+((y/2.0)**2.0))
polar_image = cv2.linearPolar(ref32,(rows/2, cols/2), value, cv2.WARP_FILL_OUTLIERS)
log_img = cv2.linearPolar(curr32,(x/2, y/2), value2, cv2.WARP_FILL_OUTLIERS)
shift = cv2.phaseCorrelate(polar_image, log_img)
(sx, sy), sf = shift
polar_image = polar_image.astype(np.uint8)
log_img = log_img.astype(np.uint8)
sx, sy, sf = round(sx, 4), round(sy, 4), round(sf, 4)
text = image_path + "\n" + "sx: " + str(sx) + " \nsy: " + str(sy) + " \nsf: " + str(sf)
images.append(rotatedImage)
rotations.append(text)
polar.append(polar_image)
Here's an approach to determine the rotational shift between two images in degrees. The idea is to find the skew angle for each image in relation to a horizontal line. If we can find this skewed angle then we can calculate the angle difference between the two images. Here are some example images to illustrate this concept
Original unrotated image
Rotated counterclockwise by 10 degrees (neg_10) and counterclockwise by 35 degrees (neg_35)
Rotated clockwise by 7.9 degrees (pos_7_9) and clockwise by 21 degrees (pos_21)
For each image, we want to determine the skew angle in relation to a horizontal line with negative being rotated counterclockwise and positive being rotated clockwise
Here's the helper function to determine this skew angle
def compute_angle(image):
# Convert to grayscale, invert, and Otsu's threshold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = 255 - gray
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find coordinates of all pixel values greater than zero
# then compute minimum rotated bounding box of all coordinates
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
# The cv2.minAreaRect() function returns values in the range
# [-90, 0) so need to correct angle
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# Rotate image to horizontal position
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return (angle, rotated)
After determining the skew angle for each image, we can simply calculate the difference
angle1, rotated1 = compute_angle(image1)
angle2, rotated2 = compute_angle(image2)
# Both angles are positive
if angle1 >= 0 and angle2 >= 0:
difference_angle = abs(angle1 - angle2)
# One positive, one negative
elif (angle1 < 0 and angle2 > 0) or (angle1 > 0 and angle2 < 0):
difference_angle = abs(angle1) + abs(angle2)
# Both negative
elif angle1 < 0 and angle2 < 0:
angle1 = abs(angle1)
angle2 = abs(angle2)
difference_angle = max(angle1, angle2) - min(angle1, angle2)
Here's the step by step walk through of whats going on. Using pos_21 and neg_10, the compute_angle() function will return the skew angle and the normalized image
For pos_21, we normalize the image and determine the skew angle. Left (before) -> right (after)
20.99871826171875
Similarly for neg_10, we also normalize the image and determine the skew angle. Left (before) -> right (after)
-10.007980346679688
Now that we have both angles, we can compute the difference angle. Here's the result
31.006698608398438
Here's results with other combinations. With neg_10 and neg_35 we get
24.984039306640625
With pos_7_9 and pos_21,
13.09155559539795
Finally with pos_7_9 and neg_35,
42.89918231964111
Here's the full code
import cv2
import numpy as np
def rotational_shift(image1, image2):
def compute_angle(image):
# Convert to grayscale, invert, and Otsu's threshold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = 255 - gray
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find coordinates of all pixel values greater than zero
# then compute minimum rotated bounding box of all coordinates
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
# The cv2.minAreaRect() function returns values in the range
# [-90, 0) so need to correct angle
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# Rotate image to horizontal position
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return (angle, rotated)
angle1, rotated1 = compute_angle(image1)
angle2, rotated2 = compute_angle(image2)
# Both angles are positive
if angle1 >= 0 and angle2 >= 0:
difference_angle = abs(angle1 - angle2)
# One positive, one negative
elif (angle1 < 0 and angle2 > 0) or (angle1 > 0 and angle2 < 0):
difference_angle = abs(angle1) + abs(angle2)
# Both negative
elif angle1 < 0 and angle2 < 0:
angle1 = abs(angle1)
angle2 = abs(angle2)
difference_angle = max(angle1, angle2) - min(angle1, angle2)
return (difference_angle, rotated1, rotated2)
if __name__ == '__main__':
image1 = cv2.imread('pos_7_9.png')
image2 = cv2.imread('neg_35.png')
angle, rotated1, rotated2 = rotational_shift(image1, image2)
print(angle)

Finding the angle of lines detected using OpenCV

I am using OpenCV for a robot vision project - navigating a maze. I can detect the lines where the walls of the maze meet the floor. And now need to use these detected lines to calculate which way the robot should turn.
In order to work out which way the robot should move I believe the solution is to calculate the angle of the walls in relation to the position of the robot. However where both walls are found how do I select which points to use as a reference.
I understand that I can use the python atan2 formula to calculate the angle between two points but after that I am completely lost.
Here is my code:
# https://towardsdatascience.com/finding-driving-lane-line-live-with-opencv-f17c266f15db
# Testing edge detection for maze
import cv2
import numpy as np
import math
image = cv2.imread("/Users/BillHarvey/Documents/Electronics_and_Robotics/Robot_Vision_Project/mazeme/maze1.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size,kernel_size),0)
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# create a mask of the edges image using cv2.filpoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# define the Region of Interest (ROI) - source code sets as a trapezoid for roads
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(100, 420), (1590, 420),(imshape[1],imshape[0])]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
# mybasic ROI bounded by a blue rectangle
#ROI = cv2.rectangle(image,(0,420),(1689,839),(0,255,0),3)
# define the Hough Transform parameters
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 #minimum number of pixels making up a line
max_line_gap = 30 # maximum gap in pixels between connectable line segments
# make a blank the same size as the original image to draw on
line_image = np.copy(image)*0
# run Hough on edge detected image
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
angle = math.atan2(x2-x1, y2-y1)
angle = angle * 180 / 3.14
print("Angle = ", angle)
# draw the line on the original image
lines_edges = cv2.addWeighted(image, 0.8, line_image, 1, 0)
#return lines_edges
#cv2.imshow("original", image)
#cv2.waitKey(0)
#cv2.imshow("edges", edges)
#cv2.waitKey(0)
cv2.imshow("detected", lines_edges)
cv2.waitKey(0)
cv2.imwrite("lanes_detected.jpg", lines_edges)
cv2.destroyAllWindows()
I have added the athn2 forumla in the piece of code that draws blue lines where HoughLinesP has detected lines.
And to convert the results (angle) to degrees I found this formula:
angle = angle * 180 / 3.14
The following piece of code:
print("Angle = ", angle)
Prints 13 angles which may or may not equate to the lines in the pic, do they? To avoid getting a - degrees I had to do x2-x1, y2-y1 rather than the other way around which I have seen elsewhere.
I do apologise for my fundental lack of python and mathematical knowledge but any help would be gratefully received.

Camera displacement estimation in OpenCV - incorrect pose output

I am currently filming using one camera facing downwards with a chessboard pattern in a fixed position on the ground. I am using python with OpenCV to track the displacement of the camera and using the output to plot displacement in terms of the x,y,z directions. Ultimately I want to mount the camera to the underside of a hovering multirotor UAV in order to calibrate the GPS accuracy.
The basic method I am using is:
Define object points
Open video
Undistort frame based on saved camera matrix (camera calibration already performed)
Find chessboard corners
If corners found, refine corners
Find the rotation and translation vectors (cv2.pnpransac)
Project 3D points to image plane (cv2.projectpoints)
Convert rotation vector to rotation matrix as per this answer:
np_rodrigues = np.asarray(rvecs_new[:,:],np.float64)
rmatrix = cv2.Rodrigues(np_rodrigues)[0]
Calculate camera pose as per this answer:
cam_pos = -np.matrix(rmatrix).T * np.matrix(tvecs_new)
Store values
camx.append(cam_pos.item(0))
camy.append(cam_pos.item(1))
camz.append(cam_pos.item(2))
However when I run this code with a video that should be a straight line at constant altitude, the plotted x,y graph gives a curved x,y plot, and z is not constant as shown by the x,z plot: http://imgur.com/QIY3wgQ,pDM5T0x,HEDJtAt#1
Is there any reason why this should not be giving a straigh line on the graph? Perhaps an error with camera pose calculation in step 9?
The complete code is as follows:
# Criteria, defining object points
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
#IMPORTANT: Enter chess board dimensions
chw = 9
chh = 6
#Defining draw functions for lines
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
# Load previously saved data
with np.load('camera_calib.npz') as X:
mtx, dist, _, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs','imgpts')]
# Criteria, defining object points
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((chh*chw,3), np.float32)
objp[:,:2] = np.mgrid[0:chw,0:chh].T.reshape(-1,2)
# Setting axis
axis = np.float32([[9,0,0], [0,6,0], [0,0,-10]]).reshape(-1,3)
cap = cv2.VideoCapture('Calibration\\video_chess2.MP4')
count = 0
fcount = 0
while(cap.isOpened()):
ret1, img = cap.read()
if ret1 == False or count==lim:
print('Video analysis complete.')
break
if count > 0:
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
# Undistorting
img2 = cv2.undistort(img, mtx, dist, None, newcameramtx)
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret2, corners = cv2.findChessboardCorners(gray, (chw,chh),None)
if ret2 == True:
fcount = fcount+1
# Refining corners
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors
rvecs_new, tvecs_new, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)
# Project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs_new, tvecs_new, mtx, dist)
draw(img2,corners,imgpts)
cv2.imshow('img',img2)
cv2.waitKey(1)
# Converting rotation vector to rotation matrix
np_rodrigues = np.asarray(rvecs_new[:,:],np.float64)
rmatrix = cv2.Rodrigues(np_rodrigues)[0]
# Pose (According to https://stackoverflow.com/questions/16265714/camera-pose-estimation-opencv-pnp)
cam_pos = -np.matrix(rmatrix).T * np.matrix(tvecs_new)
camx.append(cam_pos.item(0))
camy.append(cam_pos.item(1))
camz.append(cam_pos.item(2))
else:
print 'Board not found'
count += 1
print count
cv2.destroyAllWindows()
plt.plot(camx,camy)
plt.show()

Categories

Resources