Matching a real world picture with a 3D model inaccuracy problem - python

Suppose that I have a fixed physical camera and static scene. I have a point cloud scan of the physical world, so I can use basic surfaces and cubes to perform a simple reconstruction of the real world.
Simple reconstruction in unity
Pointcloud scan
Next step is calculate real world camera pose using checkerboard and PnP. After calculation, I used the resulting Tvec, Rvec, and ProjectPoint to draw a virtual cube in world unit, it shows up perfectly, showing that the camera pose is valid within opencv framework.
Verify camera pose after PnP
However, when I put the resulting camera transformation in Unity, the camera translation seems to be off by half a meter compared to the physical world estimate. Ideally what I would like to achieve is a pixel-perfect alignment between a real world image and a unity camera view image (which is a digital twin of the physical world).
Tvec Rvec
Thank you for your insights in advance!
Below is code for calculatePnP
import numpy as np
import cv2
import glob
import os
from scipy.spatial.transform import Rotation
# Used to draw standard axis
# https://numpy.org/doc/stable/reference/generated/numpy.ravel.html
def draw(img, corners, imgPoints):
corner = tuple(corners[0].ravel())
print("int(corner[0])):\n {0}".format(int(corner[0])))
print("int(corner[1])):\n {0}".format(int(corner[1])))
print("int(tuple(imgPoints[0].ravel())[0]):\n {0}".format(int(tuple(imgPoints[0].ravel())[0])))
print("int(tuple(imgPoints[0].ravel())[1]):\n {0}".format(int(tuple(imgPoints[0].ravel())[1])))
img = cv2.line(img, (int(corner[0]), int(corner[1])), (int(tuple(imgPoints[0].ravel())[0]),int(tuple(imgPoints[0].ravel())[1])), (255,0,0), 5)
img = cv2.line(img, (int(corner[0]), int(corner[1])), (int(tuple(imgPoints[1].ravel())[0]),int(tuple(imgPoints[1].ravel())[1])), (0,255,0), 5)
img = cv2.line(img, (int(corner[0]), int(corner[1])), (int(tuple(imgPoints[2].ravel())[0]),int(tuple(imgPoints[2].ravel())[1])), (0,0,255), 5)
return img
# used to draw a standard cube (1,1,1)
# opencv official
def drawCube(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
return img
# Load the camera calibration data
os.chdir('C:/Users/')
with np.load('opencvcalib.npz') as calibData:
mtx, dist, rvecs, tvecs = [calibData[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
print("Previously calibrated dist:\n {0}".format(dist))
print("mtx:\n {0}".format(mtx))
# Define the chess board rows and columns
rows = 9
cols = 6
# Set the termination criteria for the corner sub-pixel algorithm
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 0.001)
# Prepare the object points: (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0). They are the same for all images
objectPoints = np.zeros((rows * cols,3), np.float32)
objectPoints[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
print("objpts before divide:\n {0}".format(objectPoints))
#scale object points to real world, 1unit = 0.034m, should divide by 29.41176470588235
objectPoints = objectPoints / 29.41176470588235
print("divided objpts:\n {0}".format(objectPoints))
# Create the axis points, unit is meters, here shortest X axis is 10cm.
axisPoints = np.float32([[0.1, 0, 0], [0, 0.2, 0], [0, 0, -0.3]]).reshape(-1, 3)
#this unit is per checkerboard square
#axisPoints = np.float32([[1, 0, 0], [0, 2, 0], [0, 0, -5]]).reshape(-1, 3)
# Loop over the image files
os.chdir('C:/Users/Calibration/extrinsics')
img = cv2.imread("ext3.jpg");
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners, and visualize
ret, corners = cv2.findChessboardCorners(gray, (rows, cols), None)
imgchkbrd = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imwrite('corners.jpg', imgchkbrd)
# Make sure the chess board pattern was found in the image
if ret == True:
# Refine the corner position
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
# Find the rotation and translation vectors
val, rvecs, tvecs, inliers = cv2.solvePnPRansac(objectPoints, corners, mtx, dist)
#success, rvecs, tvecs = cv2.solvePnP(objectPoints, corners, mtx, dist,flags=cv2.SOLVEPNP_ITERATIVE)
#print("objpts:\n {0}".format(objectPoints))
print("corners:\n {0}".format(corners))
print ("Rotation Vector:\n {0}".format(rvecs))
print ("Translation Vector:\n {0}".format(tvecs))
#https://stackoverflow.com/questions/16265714/camera-pose-estimation-opencv-pnp
Rt = cv2.Rodrigues(rvecs)
print ("Rt tuple:\n {0}".format(Rt))
R = Rt.transpose()# 'tuple' object has no attribute 'transpose'
pos = -R * tvecs #pos is the position of the camera expressed in the global frame
roll = atan2(-R[2][1], R[2][2])
pitch = asin(R[2][0])
yaw = atan2(-R[1][0], R[0][0])
print ("pos of camera:\n {0}".format(pos))
#position of camera would be {- transpose( r ) * t }
#quaternion
r = Rotation.from_rotvec(rvecs.T)
quaternion = r.as_quat()
print("Quaternion1:\n {0}".format(quaternion))
RotationMatrix,_ = cv2.Rodrigues(rvecs)
print("RotationMatrix:\n {0}".format(RotationMatrix))
# Project the 3D axis points to the image plane
axisImgPoints, jac = cv2.projectPoints(axisPoints, rvecs, tvecs, mtx, dist)
# Draw the axis lines
img = draw(img, corners, axisImgPoints)
#render a cube
CubeAxis = np.float32([[0,0,0], [0,0.034,0], [0.034,0.034,0], [0.034,0,0],
[0,0,-0.034],[0,0.034,-0.034],[0.034,0.034,-0.034],[0.034,0,-0.034] ])
axisImgPoints, jac = cv2.projectPoints(CubeAxis, rvecs, tvecs, mtx, dist)
img2 = drawCube(img, corners, axisImgPoints)
# Display the image
cv2.imshow('chess board', img2)
cv2.imwrite('checkerboardpnp3.png', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

Problem to remove distortion (after camera-calibration) from image

The image is a single frame of a 90-minute video.
If one draws a line from the top left corner (see, left-distorted) to the right (see, right-distorted), the distortion is visible by looking at the top-line (see, middle-distorted).
The white line between the points to the left and the right should be straight but has a slight U-shape compared to the straight line in red.
Performing a camera calibration with open-cv (reference) using a sample of entire frames from calibration videos (for some examples see, indoor, outdoor-1, and outdoor-2) results in this undistorted image.
Here, the line from the top left corner (see, left-undistorted) to the right (see, right-undistorted) overcorrects the distortion as the difference between the red-line and the white has become an inverse U shape (see, middle-undistorted).
I took the recording with an iPhone 11, and I am using python 3.8.8 and open-cv 4.5.3.
I followed the advice I could find at StackOverflow (and through the most popular search results), but using any checkerboard (variation of size, camera angle, distance, and setting) does not correct distortion correction. I fail to understand why.
UPDATE:
Based on below conversation the calibration footage needs to
have a focus, similar to the reference scenario (in my case, objects are far away),
the board should cover a minimum of say 20% of the image.
Here the video that I used (focus is fixed on objects being far away) to extract a sample of 50 frames to calculate the camera calibration:
SQUARE_SIZE = 0.012
CHECKERBOARD_WIDTH = 22
CHECKERBOARD_HEIGHT = 15
objpoints = []
imgpoints = []
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
val = cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE
SAMPLE_SIZE = 50
objp = np.zeros((CHECKERBOARD_HEIGHT * CHECKERBOARD_WIDTH, 3), np.float32)
objp[:, :2] = np.mgrid[0:CHECKERBOARD_WIDTH, 0:CHECKERBOARD_HEIGHT.T.reshape(-1, 2)
objp = objp * SQUARE_SIZE
# for loop going over the sample of images
img = cv2.imread('path to image')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (CHECKERBOARD_WIDTH[i], CHECKERBOARD_HEIGHT[i]), val)
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Calculate camera calibration
img = cv2.imread('path to reference image')
h,w = img.shape[:2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,
imgpoints,
(w,h),
None,
None)
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
# Undistort
mapx,mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
cv2.imwrite('path to output image', dst)
The result is unfortunately not correct as shown by the deviation of the red and white line at the top of the pitch.

Python OpenCV.remap/undistort cut the image

I try to implement Camera calibration from OpenCV python site. This is my code:
import numpy as np
import cv2 as cv
import glob
CHECKERBOARD = (8,6)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((CHECKERBOARD[0]*CHECKERBOARD[1],3), np.float32) #[56][3]
# (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp[:,:2] = np.mgrid[0:CHECKERBOARD[0],0:CHECKERBOARD[1]].T.reshape(-1,2)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
# images like *.jpg
lastImageWithPattern = None # last image with pattern recognize
images = glob.glob('*.jpg')
for fname in images:
img = cv.imread(fname) #read image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) #black and white
# find corners
# ret: are there corners, corners: corners coordinates
ret, corners = cv.findChessboardCorners(gray,(CHECKERBOARD[0],CHECKERBOARD[1]), None)
if ret == True:
lastImageWithPattern = fname
print("Found pattern in " + fname)
# fix corners coordinates
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
# save corners coordinate
objpoints.append(objp)
imgpoints.append(corners)
if lastImageWithPattern is not None:
# Camera calibration
# return: is OK calib, camera matrix, distortion,rotation vector, translation vector
ret, matrix, distortion, r_vecs, t_vecs = cv.calibrateCamera( objpoints, imgpoints, gray.shape[::-1], None, None)
img = cv.imread(lastImageWithPattern)
h,w = img.shape[:2] # размерите на изображението
newCameraMatrix, roi = cv.getOptimalNewCameraMatrix(matrix, distortion, (w,h),1,(w,h))
#fix distortion
mapx, mapy = cv.initUndistortRectifyMap(matrix, distortion, None, newCameraMatrix, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
# Crop image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibResult.jpg', dst)
I use 10 photos. For example here I will show only one image. Problem with the result is same if you use only this image:
In the end of script I expect image with fixed distortion and maybe some missing pixsels but not almost all of them. It this case my result image(calibResult.jpg) is:
Result is the same if I use cv.undistort from the same titorial.
I want to know why image is so cut and maybe more distorted.
My code work OK when I use samples/data/left01.jpg – left14.jpg and maybe something in my images is not OK but I don't know what and how to debug it.

Chessboard Pattern Appears Rectangular After Intrinsic Calibration OpenCV

I am working on a project in which I plan to use a checkerboard to derive the pose of multiple images from a camera and then use those images to create a 3d model. Currently I am working on the intrinsic Calibration of my camera.
I am following the tutorial specified here. I have included my code below though as there are some adjustments due to different grid sizes.
import numpy as np
import cv2 as cv
import glob
import pickle
import matplotlib.pyplot as plt
import pdb
# termination criteria
# Stops if the specified accuracy (epsilon) is met
# Stops if the max number of iterations is exceededd
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
squareSZ = 23.876 #Square edge length in mm
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)*squareSZ
print(objp.shape)
# Plots the object points
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(objp[:,0],objp[:,1],objp[:,2])
ax.set_xlabel('x-axis')
ax.set_ylabel('y-axis')
ax.set_zlabel('z-axis')
for i in np.arange(0,objp.shape[0]):
ax.text(objp[i,0],objp[i,1],objp[i,2],i)
plt.show()
pdb.set_trace()
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
#Gets all the jpg filenames in the callImgIphone directory
images = glob.glob('goProCal3/*.JPG')
print('Loaded Images')
for fname in images:
print('Analyzing File: ',fname)
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) #Changes from BGR colorspace to Gray colorspace
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
#Iterates to find the sub-pixel accurate location of corners or radial saddle points
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(img, (9,6), corners2, ret)
cv.imshow('img', img)
cv.waitKey(500)
cv.destroyAllWindows()
# Uses object points and image points to determine the camera matrix, distortion coefficients, rotation and translation vectors etc
# ret: The return value tells whether calibration was successful or not
# Mtx: This is the camera matrix it contains the output 3x3 intrinsic camera matrix
# dist: vector of the distortion coefficients
# rvecs: For each image this gives the rotations to go from the object coordinate space to the camera coordinate space
# tvecs: For each image this gives the translations ' '
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
rvecs = np.array(rvecs)
tvecs = np.array(tvecs)
print(rvecs.shape)
print(tvecs.shape)
pdb.set_trace()
#
# Tests the calibration on a new image
img = cv.imread('GOPR01.jpg')
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
# Undistort the test image and save it
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibresult.png', dst)
# Determine the error in the re-projection using arithmetic mean
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)
mean_error += error
print( "total error: {}".format(mean_error/len(objpoints)) )
Once this is complete some of the defects in the image appear fixed but in the final calibrated image the squares of the chessboard pattern appear as rectangles. I find this confusing is this what I should expect if the code is working correctly?

Incomplete Circle detection with opencv python

is it possible to get the coordinates of the incomplete circle? i am using opencv and python. so i can find the most of the circles.
But i have no clue how can i detect the incomplete cirlce in the picture.
I am looking for a simple way to solve it.
import sys
import cv2 as cv
import numpy as np
## [load]
default_file = 'captcha2.png'
# Loads an image
src = cv.imread(cv.samples.findFile(default_file), cv.IMREAD_COLOR)
## [convert_to_gray]
# Convert it to gray
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [convert_to_gray]
## [reduce_noise]
# Reduce the noise to avoid false circle detection
gray = cv.medianBlur(gray, 3)
## [reduce_noise]
## [houghcircles]
#rows = gray.shape[0]
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, 5,
param1=1, param2=35,
minRadius=1, maxRadius=30)
## [houghcircles]
## [draw]
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv.circle(src, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (255, 0, 255), 3)
## [draw]
## [display]
cv.imshow("detected circles", src)
cv.waitKey(0)
## [display]
Hi - there is an other Picture. I want the x and y cords of the incomplete circle, light blue on the lower left.
Here the original Pic:
You need to remove the colorful background of your image and display only circles.
One approach is:
Get the binary mask of the input image
Apply Hough Circle to detect the circles
Binary mask:
Using the binary mask, we will detect the circles:
Code:
# Load the libraries
import cv2
import numpy as np
# Load the image
img = cv2.imread("r5lcN.png")
# Copy the input image
out = img.copy()
# Convert to the HSV color space
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Get binary mask
msk = cv2.inRange(hsv, np.array([0, 0, 130]), np.array([179, 255, 255]))
# Detect circles in the image
crc = cv2.HoughCircles(msk, cv2.HOUGH_GRADIENT, 1, 10, param1=50, param2=25, minRadius=0, maxRadius=0)
# Ensure circles were found
if crc is not None:
# Convert the coordinates and radius of the circles to integers
crc = np.round(crc[0, :]).astype("int")
# For each (x, y) coordinates and radius of the circles
for (x, y, r) in crc:
# Draw the circle
cv2.circle(out, (x, y), r, (0, 255, 0), 4)
# Print coordinates
print("x:{}, y:{}".format(x, y))
# Display
cv2.imshow("out", np.hstack([img, out]))
cv2.waitKey(0)
Output:
x:178, y:60
x:128, y:22
x:248, y:20
x:378, y:52
x:280, y:60
x:294, y:46
x:250, y:44
x:150, y:62
Explanation
We have three chance for finding the thresholding:
Simple Threshold result:
Adaptive Threshold
Binary mask
As we can see the third option gave us a suitable result. Of course, you could get the desired result with other options, but it might take a long time for finding the suitable parameters. Then we applied Hough circles, played with parameter values, and got the desired result.
Update
For the second uploaded image, you can detect the semi-circle by reducing the first and second parameters of the Hough circle.
crc = cv2.HoughCircles(msk, cv2.HOUGH_GRADIENT, 1, 10, param1=10, param2=15, minRadius=0, maxRadius=0)
Replacing the above line in the main code will result in:
Console result
x:238, y:38
x:56, y:30
x:44, y:62
x:208, y:26

Camera calibration, reverse projection of pixel to direction

I am using OpenCV to estimate a webcam's intrinsic matrix from a series of chessboard images - as detailed in this tutorial, and reverse project a pixel to a direction (in term of azimuth/elevation angles).
The final goal is to let the user select a point on the image, estimate the direction of this point in relation to the center of the webcam, and use this as DOA for a beam-forming algorithm.
So once I have estimated the intrinsic matrix, I reverse project the user-selected pixel (see code below) and display it as azimuth/elevation angles.
result = [0, 0, 0] # reverse projected point, in homogeneous coord.
while 1:
_, img = cap.read()
if flag: # If the user has clicked somewhere
result = np.dot(np.linalg.inv(mtx), [mouse_x, mouse_y, 1])
result = np.arctan(result) # convert to angle
flag = False
cv2.putText(img, '({},{})'.format(mouse_x, mouse_y), (20, 440), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(img, '({:.2f},{:.2f})'.format(180/np.pi*result[0], 180/np.pi*result[1]), (20, 460),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
My problem is that I'm not sure whether my results are coherent. The major incoherence is that, the point of the image corresponding to the {0,0} angle is noticeably off the image center, as seen below (camera image has been replaced by a black background for privacy reasons) :
I don't really see a simple yet efficient way of measuring the accuracy (the only method I could think of was to use a servo motor with a laser on it, just under the camera and point it to the computed direction).
Here is the intrinsic matrix after calibration with 15 images :
I get an error of around 0.44 RMS which seems satisfying.
My calibration code :
nCalFrames = 12 # number of frames for calibration
nFrames = 0
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # termination criteria
objp = np.zeros((9*7, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:7].T.reshape(-1, 2)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
cap = cv2.VideoCapture(0)
previousTime = 0
gray = 0
while 1:
# Capture frame-by-frame
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 7), None)
# If found, add object points, image points (after refining them)
if ret:
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
if time.time() - previousTime > 2:
previousTime = time.time()
imgpoints.append(corners2)
objpoints.append(objp)
img = cv2.bitwise_not(img)
nFrames = nFrames + 1
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 7), corners, ret)
cv2.putText(img, '{}/{}'.format(nFrames, nCalFrames), (20, 460), cv2.FONT_HERSHEY_SIMPLEX,
2, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(img, 'press \'q\' to exit...', (255, 15), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 1, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('Webcam Calibration', img)
if nFrames == nCalFrames:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
RMS_error, mtx, disto_coef, _, _ = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
EDIT: another test method would be to use a whiteboard with known angles points and estimate the error by comparing with experimental results, but I don't know how to set up such a system
Regarding your first concern, it is normal to have the principal point off the image center. The estimated point, which is the point of zero elevation and azimuth, is the one that minimizes the radial distortion coefficients, and for a low value wide angle lens (e.g., that of a typical webcam) it can be easily off by noticeable amount.
Your calibration should be ok up to the call to calibrateCamera. However, in your code snippet it seems your ignoring the distortion coefficients. What is missing is initUndistortRectifyMap, which lets you also re-center the principal point if that matters.
h, w = img.shape[:2]
# compute new camera matrix with central principal point
new_mtx,roi = cv2.getOptimalNewCameraMatrix(mtx,disto_coef,(w,h),1,(w,h))
print(new_mtx)
# compute undistort maps
mapx,mapy = cv2.initUndistortRectifyMap(mtx,disto_coef,None,new_mtx,(w,h),5)
It essentially makes focal length equal in both dimensions and centers the principal point (see OpenCV python documentation for parameters).
Then, at each
_, img = cap.read()
you must undistort the image before rendering
# apply the remap
img = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
# crop the image
x,y,w,h = roi
img = img[y:y+h, x:x+w]
here, I put background to green to emphasize the barrel distortion. The output could be something like this (camera image replaced by checkerboard for privacy reasons):
If you do all these, your calibration target is accurate and your calibration samples fill the entire image area you should be quite confident of the computation. However, to validate the measured azimuth and elevation with respect to the undistorted image's pixel readings, I'd maybe suggest tape measure from the lenses first principal point and a calibration plate placed in normal angle right in front of the camera. There you can compute the expected angles and compare.
Hope this helps.

Categories

Resources