ret&corners are always False&None.I wrote img.py
import numpy as np
import cv2
import glob
from PIL import Image
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = Image.open('cosme.jpg')
gray = cv2.cvtColor(np.array(images),cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
print(ret)
print(corners)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Always ret is False,so image is not shown.print(ret) shows False and print(corners) shows None ,so always program does not go into if statement.
I really cannot understand why ret are always False because gray has normal value.I think (7,6) causes this error but I cannot know what causes this error.What is wrong in my codes?How should I fix this?
It would help if you posted the image itself - it should have a calibration checkerboard with inner corner dimensions of 7,6 visible in it - if the corners aren't found, then the return value is False and the list of corners will be None
Related
I try to implement Camera calibration from OpenCV python site. This is my code:
import numpy as np
import cv2 as cv
import glob
CHECKERBOARD = (8,6)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((CHECKERBOARD[0]*CHECKERBOARD[1],3), np.float32) #[56][3]
# (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp[:,:2] = np.mgrid[0:CHECKERBOARD[0],0:CHECKERBOARD[1]].T.reshape(-1,2)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
# images like *.jpg
lastImageWithPattern = None # last image with pattern recognize
images = glob.glob('*.jpg')
for fname in images:
img = cv.imread(fname) #read image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) #black and white
# find corners
# ret: are there corners, corners: corners coordinates
ret, corners = cv.findChessboardCorners(gray,(CHECKERBOARD[0],CHECKERBOARD[1]), None)
if ret == True:
lastImageWithPattern = fname
print("Found pattern in " + fname)
# fix corners coordinates
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
# save corners coordinate
objpoints.append(objp)
imgpoints.append(corners)
if lastImageWithPattern is not None:
# Camera calibration
# return: is OK calib, camera matrix, distortion,rotation vector, translation vector
ret, matrix, distortion, r_vecs, t_vecs = cv.calibrateCamera( objpoints, imgpoints, gray.shape[::-1], None, None)
img = cv.imread(lastImageWithPattern)
h,w = img.shape[:2] # размерите на изображението
newCameraMatrix, roi = cv.getOptimalNewCameraMatrix(matrix, distortion, (w,h),1,(w,h))
#fix distortion
mapx, mapy = cv.initUndistortRectifyMap(matrix, distortion, None, newCameraMatrix, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
# Crop image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibResult.jpg', dst)
I use 10 photos. For example here I will show only one image. Problem with the result is same if you use only this image:
In the end of script I expect image with fixed distortion and maybe some missing pixsels but not almost all of them. It this case my result image(calibResult.jpg) is:
Result is the same if I use cv.undistort from the same titorial.
I want to know why image is so cut and maybe more distorted.
My code work OK when I use samples/data/left01.jpg – left14.jpg and maybe something in my images is not OK but I don't know what and how to debug it.
I am working on a project in which I plan to use a checkerboard to derive the pose of multiple images from a camera and then use those images to create a 3d model. Currently I am working on the intrinsic Calibration of my camera.
I am following the tutorial specified here. I have included my code below though as there are some adjustments due to different grid sizes.
import numpy as np
import cv2 as cv
import glob
import pickle
import matplotlib.pyplot as plt
import pdb
# termination criteria
# Stops if the specified accuracy (epsilon) is met
# Stops if the max number of iterations is exceededd
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
squareSZ = 23.876 #Square edge length in mm
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)*squareSZ
print(objp.shape)
# Plots the object points
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(objp[:,0],objp[:,1],objp[:,2])
ax.set_xlabel('x-axis')
ax.set_ylabel('y-axis')
ax.set_zlabel('z-axis')
for i in np.arange(0,objp.shape[0]):
ax.text(objp[i,0],objp[i,1],objp[i,2],i)
plt.show()
pdb.set_trace()
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
#Gets all the jpg filenames in the callImgIphone directory
images = glob.glob('goProCal3/*.JPG')
print('Loaded Images')
for fname in images:
print('Analyzing File: ',fname)
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) #Changes from BGR colorspace to Gray colorspace
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
#Iterates to find the sub-pixel accurate location of corners or radial saddle points
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(img, (9,6), corners2, ret)
cv.imshow('img', img)
cv.waitKey(500)
cv.destroyAllWindows()
# Uses object points and image points to determine the camera matrix, distortion coefficients, rotation and translation vectors etc
# ret: The return value tells whether calibration was successful or not
# Mtx: This is the camera matrix it contains the output 3x3 intrinsic camera matrix
# dist: vector of the distortion coefficients
# rvecs: For each image this gives the rotations to go from the object coordinate space to the camera coordinate space
# tvecs: For each image this gives the translations ' '
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
rvecs = np.array(rvecs)
tvecs = np.array(tvecs)
print(rvecs.shape)
print(tvecs.shape)
pdb.set_trace()
#
# Tests the calibration on a new image
img = cv.imread('GOPR01.jpg')
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
# Undistort the test image and save it
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibresult.png', dst)
# Determine the error in the re-projection using arithmetic mean
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)
mean_error += error
print( "total error: {}".format(mean_error/len(objpoints)) )
Once this is complete some of the defects in the image appear fixed but in the final calibrated image the squares of the chessboard pattern appear as rectangles. I find this confusing is this what I should expect if the code is working correctly?
I'm trying to do camera calibration, I have taken the code from open cv documentation. Here is my code -
import numpy as np
import cv2
import glob
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
objpoints = []
imgpoints = []
images = glob.glob('/usr/local/share/OpenCV/samples/cpp/chess*.jpg')
img = cv2.imread("2.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret = False
ret, corners = cv2.findChessboardCorners(gray, (7, 6))
print (ret)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7,6), corners, ret)
cv2.imshow('img',img)
cv2.imwrite('Corners_detected.jpg', img, None)
cv2.waitKey(0)
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
gray.shape[::-1],None,None)
img = cv2.imread('2.jpg')
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
cv2.imwrite('calibration_result.png',dst)
In this code image 2.jpg is taken for calibration,
This is the image considered for understanding of calibration
My code is detecting corners for only this image. It is not working fine with other checker board image.It is not able to detect corners. Why is it so ?
Unfortunately, I do not have enough reputation to comment and clarify some points. However, I will try to answer anyway. Given you have added the print(ret) I assume this is where your problem lies.
It looks like you are using the wrong checkerboard size in cv2.findChessboardCorners(gray, (7, 6)). I have found this function returns False given the wrong input dimension values.
This is als the case for the objp object.
Given the image, you are showing this should be n-1 and m-1 (where n and m are the checkboard dimensions).
For your given image, this should be cv2.findChessboardCorners(gray, (9, 6))
Notice on the opencv calibration example the checkerboard is an 8x7, hence the given 7x6 input value.
The thing about the Camera Calibration method is that it sometimes will not recognize a Checkerboard grid that isn't the maximum size. You could most likely get away with 8,6 or 9,5 as the size. However, with 6,7 there is too much of a difference and so the method won't recognize it.
I don't have any research sources but I've tested this myself before.
I'm using a source code example from Open CV for Python documentation as follows:
import numpy as np
import cv2
import glob
# termination criteria in this, 30 max number of iterations, 0.001 minimum accuracy
# CV_TERMCRIT_ITER or CV_TERMCRIT_EPS, tells the algorithm that we want to terminate either after some number of iterations or when the convergence metric reaches some small value (respectively).
# The next two arguments set the values at which one, the other, or both of these criteria should terminate the algorithm.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
# fname= 'C:\\Users\\Bender\\Desktop\\fotospayloads\\'
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners2, ret)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
rms, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
Unfortunately when I run the source code I get the following error:
"NameError: name 'gray' is not defined" (line 50).
Any help would be very much appreciated.
Thanks
Isaac
There are no images in folder where your script is located and that is why glob.glob('.jpg') does not return any files and grey object is not created.
I'm running python 3 on a raspberry pi 3 and have opencv installed. I took 10 images of a checkerboard, it detects all 10 images and displays them, but when it gets to the last line, it throws an error. Here's the images i used: https://imgur.com/gallery/IDfHH This is my code:
import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
for fname in images:
print('test')
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (6,9),None)
# If found, add object points, image points (after refining them)
if ret == True:
print('test2')
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (6,9), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
print('test3')
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
the example assumes that you have a 6x7 chessboard image, i think you have a 6x9.
you have to prepare the objp variable for a 6x9 calibration image, so the code has to be like this: objp = np.zeros((6*9,3), np.float32)
code:
objp = np.zeros((6*9,3), np.float32)
Thanks #Rui Sebastiao.
I was using 14 x 10 so I changed the following lines and at least no error :)
objp = np.zeros((14*10, 3), np.float32)
objp[:, :2] = np.mgrid[0:14, 0:10].T.reshape(-1, 2)