Distance between two aruco markers changes at different angles - python

I'm trying to write a program to find the distance between two aruco markers and the distance seems to be about right when the markers are located to the left and right of the camera frame, but when they are located at the top and bottom of the camera frame, the distance between them is totally wrong?
Here's my code:
import numpy as np
import cv2
import cv2.aruco as aruco
def mag(x):
return np.sqrt(x[0]**2+x[1]**2+x[2]**2)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
mtx = np.array([[ 8.8213173810477792e+02, 0., 6.2265593183247574e+02],[ 0.,
9.0710890795744012e+02, 4.1207655778884924e+02],[ 0., 0., 1. ]])
dist = np.array([ 5.3943178087169294e-03, -2.0750715497990183e-01,
-1.2051405507801402e-03, -2.0261943010518283e-03,
4.2906970296298941e-01 ])
cap = cv2.VideoCapture(0) # Get the camera source
alltvec = np.zeros([50,3])
def track(matrix_coefficients, distortion_coefficients):
while True:
ret, frame = cap.read() #capture a fram
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) # Use 4x4 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=mtx,
distCoeff=dist)
if np.all(ids is not None): # If there are markers found by detector
for i in range(0, len(ids)): # Iterate in markers
# Estimate pos of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.031, mtx,dist)
(rvec - tvec).any() # get rid of that numpy value array error
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
alltvec[ids[i]-1] = tvec #add tvec value to array of all tvecs
if ids is not None and len(ids) > 0: # If there are two markers, reverse the second and get the difference
vector = alltvec[1]-alltvec[0]
distance = np.linalg.norm(vector)
cv2.putText(frame, "Distance: {:.2f} m".format(distance), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('frame', frame)
#Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3) & 0xFF
if key == ord('q'): # Quit
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
track(mtx,dist)
Markers when side to side
Markers when top to bottom
I've tried just getting the distances of the markers from the camera and this seems to be normal regardless of where in the frame the markers are
If it's of any help, the markers on the sheet of paper are 20cm apart in the long direction and 10cm apart in the shorter direction
(their distance apart is about right for when they are side to side but totally off when they are top to bottom of the frame)

Related

How to make cv2.HoughLinesP detect only vertical lines?

I'm trying to make it so my programme only detects an overhead wire on a train/tram but when the wire holders come into frame it detects the horizontal line of them which I don't want. I didn't know if anyone knew how to make it so it will only detect vertical lines. I tried using cv2.erode along with np.ones to only show vertical lines but I couldn't seem to get anywhere with that. Someone did mention that HoughLines can be made so there just vertical but I don't know if that's true or not. Here's my code:
import cv2
import numpy as np
import window_names
import track_bars
vid = 'blackpool_tram_result.mp4'
cap = cv2.VideoCapture(vid)
frame_counter = 0
while (True):
ret, frame = cap.read()
frame_counter += 1
if frame_counter == cap.get(cv2.CAP_PROP_FRAME_COUNT):
frame_counter = 0
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
blank = np.zeros(frame.shape[:2], dtype='uint8')
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
output = np.empty(grey.shape, dtype=np.uint8)
cv2.normalize(
grey,
output,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX)
hist = cv2.equalizeHist(output)
track_bars.lower_threshold = cv2.getTrackbarPos("lower", window_names.window_canny)
track_bars.upper_threshold = cv2.getTrackbarPos("upper", window_names.window_canny)
track_bars.smoothing_neighbourhood = cv2.getTrackbarPos("smoothing", window_names.window_canny)
track_bars.sobel_size = cv2.getTrackbarPos("sobel size", window_names.window_canny)
track_bars.smoothing_neighbourhood = max(3, track_bars.smoothing_neighbourhood)
if not (track_bars.smoothing_neighbourhood % 2):
track_bars.smoothing_neighbourhood = track_bars.smoothing_neighbourhood + 1
track_bars.sobel_size = max(3, track_bars.sobel_size)
if not (track_bars.sobel_size % 2):
track_bars.sobel_size = track_bars.sobel_size + 1
smoothed = cv2.GaussianBlur(
hist, (track_bars.smoothing_neighbourhood, track_bars.smoothing_neighbourhood), 0)
edges = cv2.Canny(
smoothed,
track_bars.lower_threshold,
track_bars.upper_threshold,
apertureSize=track_bars.sobel_size)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
minLineLength = 50 # minimum number of pixels making up a line
maxLineGap = 20
line_image = np.copy(frame) * 0
mask = cv2.rectangle(blank, (edges.shape[1]//2 + 150, edges.shape[0]//2 - 150), (edges.shape[1]//2 - 150, edges.shape[0]//2 - 300), 255, -1)
masked = cv2.bitwise_and(edges,edges,mask=mask)
lines = cv2.HoughLinesP(masked, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)
if lines is not None:
for x1, y1, x2, y2 in lines[0]:
cv2.line(frame,(x1,y1),(x2,y2),(255,0,0),5)
lines_edges = cv2.addWeighted(frame, 0.8, line_image, 1, 0)
cv2.imshow(window_names.window_hough, frame)
cv2.imshow(window_names.window_canny, edges)
cv2.imshow(window_names.window_mask, mask)
cv2.imshow(window_names.window_masked_image, masked)
key = cv2.waitKey(27)
if (key == ord('x')) & 0xFF:
break
cv2.destroyAllWindows()
HoughLines() gives you the ability to configure minimum and maximun line angles to detect. You can check here for details.
However, HoughLinesP doesn't have this option. What you can do is that filtering lines which HoughLinesP gives as output. According to the documentation:
Output vector of lines. Each line is represented by a 4-element vector
(x1,y1,x2,y2) , where (x1,y1) and (x2,y2) are the ending points of
each detected line segment.
So just get the starting(x1,y1) and ending(x2,y2) points and calculate the angles with a simple math.
By getting the results you can filter each line according to the desired angle value.

Angle between 2 ArUco markers planes

I want to measure the deviation of the angle of an ArUco marker to a plane defined by a second reference ArUco marker.
A reference ArUco marker (M1) is fixed against a flat wall and a second ArUco marker (M2) is a few centimeters in front of that same wall. I want to know when the marker M2 is deviating more than 10 degrees from the xy plane of M1.
Here is an illustration of the configuration:
To do so, I thaught I should calculate the relative rotation between the pose rvec as explained in this post:
Relative rotation between pose (rvec)
that was proposing the following code:
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT
with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
Computing the composedRvec, I get the following results :
With both ArUco markers in the same plane (composedRvec values in the top right corner) :
With both ArUco markers at a 90 degrees angle:
I do not really understand the results:
Ok for with the 0,0,0 composedRvec when markers in the same plane.
But why 0,1.78,0 in the second case?
What general condition should I have on the resulting composedRvec to tell me when the angle between the 2 markers is above 10 degrees?
Am I even following the right strategy with the composedRvec?
**** EDIT ***
Results of the 2 markers in the same xy plane with a 40° angle:
||composedRvec||= sqrt(0.619^2+0.529^2+0.711^2)=1.08 rad = 61.87°
**** EDIT 2 ***
By retaking measurements in the 40° angle configuration, I found out that the values are quite fluctuating even without modifying the set up or lightening. From time to time, I fall on the correct values:
||composedRvec||= sqrt(0.019^2+0.012^2+0.74^2)=0.74 rad = 42.4° which is quite accurate.
**** EDIT 3 ***
So here is my final code based on #Gilles-Philippe Paillé's edited answer:
import numpy as np
import cv2
import cv2.aruco as aruco
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Get the camera source
img_path='D:/your_path/'
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(img_path+"camera.yml", cv2.FILE_STORAGE_READ)
matrix_coefficients = cv_file.getNode("K").mat()
distortion_coefficients = cv_file.getNode("D").mat()
nb_markers=2
def track(matrix_coefficients, distortion_coefficients):
while True:
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.custom_dictionary(nb_markers, 5)
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray,
aruco_dict,parameters=parameters,cameraMatrix=matrix_coefficients,distCoeff=distortion_coefficients)
# store rz1 and rz2
R_list=[]
if np.all(ids is not None): # If there are markers found by detector
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients,
distortion_coefficients)
(rvec - tvec).any() # get rid of that nasty numpy value array error
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
R, _ = cv2.Rodrigues(rvec)
# convert (np.matrix(R).T) matrix to array using np.squeeze(np.asarray()) to get rid off the ValueError: shapes (1,3) and (1,3) not aligned
R = np.squeeze(np.asarray(np.matrix(R).T))
R_list.append(R[2])
# Display the resulting frame
if len(R_list) == 2:
print('##############')
angle_radians = np.arccos(np.dot(R_list[0], R_list[1]))
angle_degrees=angle_radians*180/np.pi
print(angle_degrees)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3000) & 0xFF
if key == ord('q'):
break
track(matrix_coefficients, distortion_coefficients)
And here are some results:
red -> real angle, white -> measured angle
This is out of the scope of the question but I find that the pose estimation is quite fluctuating. For example when the 2 markers are against the wall, the values easely jump from 9° to 37° without touching the system.
The result uses the Angle-axis representation, i.e., the norm of the vector is the angle of rotation (what you want), and the direction of the vector is the axis of rotation.
You are looking for θ = ||composedRvec||. Note that the result is in radians. The condition would be ||composedRvec|| > 10*π/180.
Edit: To only consider the angle between the Z-axis of both planes, convert the two rotation vectors rvec1 and rvec2 into matrices and extract the 3rd columns. The angle is then angle_radians = np.arccos(np.dot(rz1, rz2))

Drawing ArUco's pose axes to wrong place

I am working on image processing project that detects ArUco markers and doing somethings according to those. When there is only one marker in the video or image everything works fine, but when I put another marker, the second ArUco marker's pose (axes) printing to the wrong place, not to the center of the marker. Please see the screenshot I shared for better understand.
The marker with the id[2] is fine. Pose axes are drawn to the center of marker.
But, the marker's pose axes with the id1 are drawn to some random point.
co
Here is the code part of that detects ArUco markers and draws the axis.
while True:
ret, frame = cap.read()
# Operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250) # Specify marker size as 4x4, 5x5, 6x6
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# Lists of ids and the corners beloning to each marker
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
try:
if np.all(ids is not None): # If there are markers found by detector
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients,
distortion_coefficients)
(rvec - tvec).any() # get rid of that nasty numpy value array error
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw axis
c_x = (corners[i][0][0][0] + corners[i][0][1][0] + corners[i][0][2][0] + corners[i][0][3][0]) / 4 # X coordinate of marker's center
c_y = (corners[i][0][0][1] + corners[i][0][1][1] + corners[i][0][2][1] + corners[i][0][3][1]) / 4 # Y coordinate of marker's center
cv2.putText(frame, "id"+str(ids[i]), (int(c_x), int(c_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50,225,250), 2)
except:
if ids is None or len(ids) == 0:
print("******************************************************")
print("*************** Marker Detection Failed **************")
print("******************************************************")

How to calculate an epipolar line with a stereo pair of images in Python OpenCV

How can I take two images of an object from different angles and draw epipolar lines on one based on points from the other?
For example, I would like to be able to select a point on the left picture using a mouse, mark the point with a circle, and then draw an epipolar line on the right image corresponding to the marked point.
I have 2 XML files which contain a 3x3 camera matrix and a list of 3x4 projection matrices for each picture. The camera matrix is K. The projection matrix for the left picture is P_left. The projection matrix for the right picture is P_right.
I have tried this approach:
Choose a pixel coordinate (x,y) in the left image (via mouse click)
Calculate a point p in the left image with K^-1 * (x,y,1)
Calulate the pseudo inverse matrix P+ of P_left (using np.linalg.pinv)
Calculate the epipole e' of the right image: P_right * (0,0,0,1)
Calculate the skew symmetric matrix e'_skew of e'
Calculate the Fundamental matrix F: e'_skew * P_right * P+
Calculate the epipolar line l' on the right image: F * p
Calculate a point p' in the right image: P_right * P+ * p
Transform p' and l back to pixel coordinates
Draw a line using cv2.line through p' and l
I just did this a few days ago and it works just fine. Here's the method I used:
Calibrate camera(s) to obtain camera matricies and distortion matricies (Using openCV getCorners and calibrateCamera, you can find lots of tutorials on this, but it sounds like you already have this info)
Perform stereo calibration with openCV stereoCalibrate(). It takes as parameters all of the camera and distortion matricies. You need this to determine the correlation between the two visual fields. You will get back several matricies, the rotation matrix R, translation vector T, essential matrix E and fundamental matrix F.
You then want to do undistortion using openCV getOptimalNewCameraMatrix and undistort(). This will get rid of a lot of camera aberrations (it will give you better results)
Finally, use openCV's computeCorrespondEpilines to calculate the lines and plot them. I will include some code below you can try out in Python. When I run it, I can get images like this (The colored points have their corresponding epilines drawn in the other image)
Heres some code (Python 3.0). It uses two static images and static points, but you could easily select the points with the cursor. You can also refer to the OpenCV docs on calibration and stereo calibration here.
import cv2
import numpy as np
# find object corners from chessboard pattern and create a correlation with image corners
def getCorners(images, chessboard_size, show=True):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((chessboard_size[1] * chessboard_size[0], 3), np.float32)
objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)*3.88 # multiply by 3.88 for large chessboard squares
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for image in images:
frame = cv2.imread(image)
# height, width, channels = frame.shape # get image parameters
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None) # Find the chess board corners
if ret: # if corners were found
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) # refine corners
imgpoints.append(corners2) # add to corner array
if show:
# Draw and display the corners
frame = cv2.drawChessboardCorners(frame, chessboard_size, corners2, ret)
cv2.imshow('frame', frame)
cv2.waitKey(100)
cv2.destroyAllWindows() # close open windows
return objpoints, imgpoints, gray.shape[::-1]
# perform undistortion on provided image
def undistort(image, mtx, dist):
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image = os.path.splitext(image)[0]
h, w = img.shape[:2]
newcameramtx, _ = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
return dst
# draw the provided points on the image
def drawPoints(img, pts, colors):
for pt, color in zip(pts, colors):
cv2.circle(img, tuple(pt[0]), 5, color, -1)
# draw the provided lines on the image
def drawLines(img, lines, colors):
_, c, _ = img.shape
for r, color in zip(lines, colors):
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
cv2.line(img, (x0, y0), (x1, y1), color, 1)
if __name__ == '__main__':
# undistort our chosen images using the left and right camera and distortion matricies
imgL = undistort("2L/2L34.bmp", mtxL, distL)
imgR = undistort("2R/2R34.bmp", mtxR, distR)
imgL = cv2.cvtColor(imgL, cv2.COLOR_GRAY2BGR)
imgR = cv2.cvtColor(imgR, cv2.COLOR_GRAY2BGR)
# use get corners to get the new image locations of the checcboard corners (undistort will have moved them a little)
_, imgpointsL, _ = getCorners(["2L34_undistorted.bmp"], chessboard_size, show=False)
_, imgpointsR, _ = getCorners(["2R34_undistorted.bmp"], chessboard_size, show=False)
# get 3 image points of interest from each image and draw them
ptsL = np.asarray([imgpointsL[0][0], imgpointsL[0][10], imgpointsL[0][20]])
ptsR = np.asarray([imgpointsR[0][5], imgpointsR[0][15], imgpointsR[0][25]])
drawPoints(imgL, ptsL, colors[3:6])
drawPoints(imgR, ptsR, colors[0:3])
# find epilines corresponding to points in right image and draw them on the left image
epilinesR = cv2.computeCorrespondEpilines(ptsR.reshape(-1, 1, 2), 2, F)
epilinesR = epilinesR.reshape(-1, 3)
drawLines(imgL, epilinesR, colors[0:3])
# find epilines corresponding to points in left image and draw them on the right image
epilinesL = cv2.computeCorrespondEpilines(ptsL.reshape(-1, 1, 2), 1, F)
epilinesL = epilinesL.reshape(-1, 3)
drawLines(imgR, epilinesL, colors[3:6])
# combine the corresponding images into one and display them
combineSideBySide(imgL, imgR, "epipolar_lines", save=True)
Hopefully this helps!

Camera displacement estimation in OpenCV - incorrect pose output

I am currently filming using one camera facing downwards with a chessboard pattern in a fixed position on the ground. I am using python with OpenCV to track the displacement of the camera and using the output to plot displacement in terms of the x,y,z directions. Ultimately I want to mount the camera to the underside of a hovering multirotor UAV in order to calibrate the GPS accuracy.
The basic method I am using is:
Define object points
Open video
Undistort frame based on saved camera matrix (camera calibration already performed)
Find chessboard corners
If corners found, refine corners
Find the rotation and translation vectors (cv2.pnpransac)
Project 3D points to image plane (cv2.projectpoints)
Convert rotation vector to rotation matrix as per this answer:
np_rodrigues = np.asarray(rvecs_new[:,:],np.float64)
rmatrix = cv2.Rodrigues(np_rodrigues)[0]
Calculate camera pose as per this answer:
cam_pos = -np.matrix(rmatrix).T * np.matrix(tvecs_new)
Store values
camx.append(cam_pos.item(0))
camy.append(cam_pos.item(1))
camz.append(cam_pos.item(2))
However when I run this code with a video that should be a straight line at constant altitude, the plotted x,y graph gives a curved x,y plot, and z is not constant as shown by the x,z plot: http://imgur.com/QIY3wgQ,pDM5T0x,HEDJtAt#1
Is there any reason why this should not be giving a straigh line on the graph? Perhaps an error with camera pose calculation in step 9?
The complete code is as follows:
# Criteria, defining object points
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
#IMPORTANT: Enter chess board dimensions
chw = 9
chh = 6
#Defining draw functions for lines
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
# Load previously saved data
with np.load('camera_calib.npz') as X:
mtx, dist, _, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs','imgpts')]
# Criteria, defining object points
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((chh*chw,3), np.float32)
objp[:,:2] = np.mgrid[0:chw,0:chh].T.reshape(-1,2)
# Setting axis
axis = np.float32([[9,0,0], [0,6,0], [0,0,-10]]).reshape(-1,3)
cap = cv2.VideoCapture('Calibration\\video_chess2.MP4')
count = 0
fcount = 0
while(cap.isOpened()):
ret1, img = cap.read()
if ret1 == False or count==lim:
print('Video analysis complete.')
break
if count > 0:
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
# Undistorting
img2 = cv2.undistort(img, mtx, dist, None, newcameramtx)
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret2, corners = cv2.findChessboardCorners(gray, (chw,chh),None)
if ret2 == True:
fcount = fcount+1
# Refining corners
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors
rvecs_new, tvecs_new, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)
# Project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs_new, tvecs_new, mtx, dist)
draw(img2,corners,imgpts)
cv2.imshow('img',img2)
cv2.waitKey(1)
# Converting rotation vector to rotation matrix
np_rodrigues = np.asarray(rvecs_new[:,:],np.float64)
rmatrix = cv2.Rodrigues(np_rodrigues)[0]
# Pose (According to https://stackoverflow.com/questions/16265714/camera-pose-estimation-opencv-pnp)
cam_pos = -np.matrix(rmatrix).T * np.matrix(tvecs_new)
camx.append(cam_pos.item(0))
camy.append(cam_pos.item(1))
camz.append(cam_pos.item(2))
else:
print 'Board not found'
count += 1
print count
cv2.destroyAllWindows()
plt.plot(camx,camy)
plt.show()

Categories

Resources