Projecting KITTI velodyne to image produces a narrow strip - python

I am trying to project the KITTI velodyne onto the left camera images. I followed the README in the KITTI devkit, but the result is off -- the points are projected as a narrow band on the top of the image. The band looks like it has some distribution, so I am suspecting I am doing something wrong with the calibration matrices. Or maybe in the PIL.ImageDraw.point?
The projection equation that I am using is per the KITTI devkit documentation:
x = P2 * R0_rect * Tr_velo_to_cam * y, where
y is a 4xN matrix with N points in XYZL format (L is the luminescence),
Tr_velo_to_cam is the 3x4 velodyne to camera transformation matrix
R0_rect is the 3x3 extrinsic camera rotation matrix
P2 is the 3x3 intrinsic camera projection matrix
Below is the code, STDIO of it, and the produced image.
test.py:
import numpy as np
import os
from PIL import Image, ImageDraw
DATASET_PATH = "<DATASET PATH HERE>"
vld_path = os.path.join(DATASET_PATH, "velodyne/{:06d}.bin")
img_path = os.path.join(DATASET_PATH, "image_2/{:06d}.png")
clb_path = os.path.join(DATASET_PATH, "calib/{:06d}.txt")
frame_num = 58
# Load files
img = Image.open(img_path.format(frame_num))
clb = {}
with open(clb_path.format(frame_num), 'r') as clb_f:
for line in clb_f:
calib_line = line.split(':')
if len(calib_line) < 2:
continue
key = calib_line[0]
value = np.array(list(map(float, calib_line[1].split())))
value = value.reshape((3, -1))
clb[key] = value
vld = np.fromfile(vld_path.format(frame_num), dtype=np.float32)
vld = vld.reshape((-1, 4)).T
print("img.shape:", np.shape(img))
print("P2.shape:", clb['P2'].shape)
print("R0_rect.shape:", clb['R0_rect'].shape)
print("Tr_velo_to_cam.shape:", clb['Tr_velo_to_cam'].shape)
print("vld.shape:", vld.shape)
# Reshape calibration files
P2 = clb['P2']
R0 = np.eye(4)
R0[:-1, :-1] = clb['R0_rect']
Tr = np.eye(4)
Tr[:-1, :] = clb['Tr_velo_to_cam']
# Prepare 3d points
pts3d = vld[:, vld[-1, :] > 0].copy()
pts3d[-1, :] = 1
# Project 3d points
pts3d_cam = R0 # Tr # pts3d
mask = pts3d_cam[2, :] >= 0 # Z >= 0
pts2d_cam = P2 # pts3d_cam[:, mask]
pts2d = (pts2d_cam / pts2d_cam[2, :])[:-1, :]
print("pts2d.shape:", pts2d.shape)
# Draw the points
img_draw = ImageDraw.Draw(img)
img_draw.point(pts2d, fill=(255, 0, 0))
img.show()
STDOUT:
$> python ./test.py
img.shape: (370, 1224, 3)
P2.shape: (3, 4)
R0_rect.shape: (3, 3)
Tr_velo_to_cam.shape: (3, 4)
vld.shape: (4, 115052)
pts2d.shape: (2, 53119)
Produced image:

Found the problem: Notice that the dimensions of the pts2d are (2, N), which means it has N points in total. However, the ImageDraw routine expects it to be either Nx2 or 1x2N row vector with alternating x and y values. Although I couldn't get the point routine to work with the Nx2 input, I put it in the for loop (after transposing the points), and it worked.
# ...
pts2d = (pts2d_cam / pts2d_cam[2, :])[:-1, :].T
print("pts2d.shape:", pts2d.shape)
# Draw the points
img_draw = ImageDraw.Draw(img)
for point in pts2d:
img_draw.point(point, fill=(255, 0, 0))
# ...

Related

Images after stereo rectification are not row aligned

What the problem is:
I have been following this tutorial (Link) as well as the openCV documentation but I cannot seem to get my stereo rectification right.
How my problem is different:
My setup is mechanically fixed, which was the issue in this post.
What I have done:
Computed camera intrinsics and stereo parameters in MATLAB’s Camera Calibrator. Then followed said tutorial to undistort and rectify both stereo images. MATLAB’s Stereo Calibrator produces this rectified image view (which looks perfectly row-aligned) that I want to reproduce in python for the same images.
I am using six corners of a chessboard in the left frame and connecting them with a line to the corresponding corners in the right frame.
My idea is that epipolar lines must be horizontal in an undistorted and rectified image, so these connecting lines must be horizontal (average slope very close to 0). Yet they are not!
How to get there:
left image
right image
import cv2 as cv
import numpy as np
# Intrinsics from MATLAB
distortionCoefficientsL = np.array([0.1112, -0.2270, 0.0014, 7.5801e-04, 0.0835])
cameraMatrixL = np.array([[1384.3, 0, 933.5327], [0, 1383.2, 532.1460], [0, 0, 1]])
newCameraMatrixL = cameraMatrixL
distortionCoefficientsR = np.array([0.0362, -0.1640, -2.2236e-04, 3.4982e-04, 0.1148])
cameraMatrixR = np.array([[1417.1, 0, 972.7481], [0, 1418.0, 542.9659], [0, 0, 1]])
newCameraMatrixR = cameraMatrixR
# Stereo params from MATLAB
Rot = np.array([[0.9999, 0.0109, 0.0068],[-0.0111, 0.9998, 0.0178],[-0.0066, -0.0179, 0.9998]])
Trns = np.array([[-96.5080], [-1.0640], [-0.8036]])
Emat = np.array([[0.0015, 0.7844, -1.0782],[-0.1459, 1.7298, 96.4957],[0.0084, -96.4985, 1.7210]])
Fmat = np.array([[7.8440e-10, 4.0019e-07, -9.7456e-04],[-7.4317e-08, 8.8188e-07, 0.0677],[4.5630e-05, -0.0706, 3.0555]])
# Rectification and undistortion
imgL = cv.imread(‘path to left image’)
imgR = cv.imread(‘path to right image’)
grayL = cv.cvtColor(imgL,cv.COLOR_BGR2GRAY)
grayR = cv.cvtColor(imgR,cv.COLOR_BGR2GRAY)
imgSize = grayL.shape[::-1]
R_L, R_R, proj_mat_l, proj_mat_r, Q, roiL, roiR= cv.stereoRectify(newCameraMatrixL, distortionCoefficientsL, newCameraMatrixR, distortionCoefficientsR, imgSize, Rot, Trns, flags=cv.CALIB_ZERO_DISPARITY, alpha=1)
leftMapX, leftMapY = cv.initUndistortRectifyMap(newCameraMatrixL, distortionCoefficientsL, R_L, proj_mat_l, imgSize, cv.CV_32FC1)
rightMapX, rightMapY = cv.initUndistortRectifyMap(newCameraMatrixR, distortionCoefficientsR, R_R, proj_mat_r, imgSize, cv.CV_32FC1)
Left_rectified = cv.remap(imgL,leftMapX,leftMapY, cv.INTER_LINEAR, cv.BORDER_CONSTANT)
Right_rectified = cv.remap(imgR,rightMapX,rightMapY, cv.INTER_LINEAR, cv.BORDER_CONSTANT)
grayL = cv.cvtColor(Left_rectified,cv.COLOR_BGR2GRAY)
grayR = cv.cvtColor(Right_rectified,cv.COLOR_BGR2GRAY)
font = cv.FONT_HERSHEY_PLAIN
fontScale = 4
# Find all chessboard corners at subpixel accuracy
boardSize = (6,9)
subpix_criteria = (cv.TERM_CRITERIA_EPS+cv.TERM_CRITERIA_MAX_ITER, 100, 10e-06)
winSize = (11,11)
retL, cornersL = cv.findChessboardCorners(grayL, boardSize, cv.CALIB_CB_ADAPTIVE_THRESH+cv.CALIB_CB_FAST_CHECK+cv.CALIB_CB_NORMALIZE_IMAGE)
retR, cornersR = cv.findChessboardCorners(grayR, boardSize, cv.CALIB_CB_ADAPTIVE_THRESH+cv.CALIB_CB_FAST_CHECK+cv.CALIB_CB_NORMALIZE_IMAGE)
objp = np.zeros((1, boardSize[0]*boardSize[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:boardSize[0], 0:boardSize[1]].T.reshape(-1, 2)
objectPoints = []
imagePointsL = []
imagePointsR = []
slopes = []
if retR is True and retL is True:
objectPoints.append(objp)
cv.cornerSubPix(grayR, cornersR,(3,3),(-1,-1),subpix_criteria)
cv.cornerSubPix(grayL, cornersL,(3,3),(-1,-1),subpix_criteria)
imagePointsR.append(cornersR)
imagePointsL.append(cornersL)
# Get points in 4th row (vertical centre) and display them
vis = np.concatenate((Left_rectified, Right_rectified), axis=1)
for i in range(24,30):
x_l = int(round(imagePointsL[0][i][0][0]))
y_l = int(round(imagePointsL[0][i][0][1]))
cv.circle(vis, (x_l, y_l), 7, (0,255,255), -1)
x_r = int(round(imagePointsR[0][i][0][0]+Left_rectified.shape[1]))
y_r = int(round(imagePointsR[0][i][0][1]))
cv.circle(vis, (x_r, y_r), 7, (0,255,255), -1)
slope = (y_l-y_r)/(x_r-x_l)
slopes.append(slope)
cv.line(vis, (x_l,y_l), (x_r,y_r), (0,255,255), 2)
avg = sum(slopes)/len(slopes)
cv.putText(vis, 'Average slope '+str(avg),(vis.shape[1]//3, (vis.shape[0]//5)*4), font, fontScale, (0, 255, 255), 2, cv.LINE_AA)
cv.imshow('Rectification check - remapped images', vis)
cv.waitKey(0)
cv.destroyAllWindows()
This is the result so it seems to me that something is wrong with the rotaton of one or both of the images
When I undistort both images individually and connect the six corresponding points I seem to get a better row-aligned view than with the rectification process so this might be prove that cameraMatrix, newCamMatrix & distCoeffs are good?!
Any help appreciated!!
Edit:
This is another visual representation with horizontal lines instead of connecting lines between two corresponding points.
Edit2:
Prove that MATLAB detected all corners
Prove that openCV detected all corners
You probably messed up with the rotation matrix. I did some test with my code and obtained the same result (up to numerical precision).
It turns out that you probably inverted the rotation matrix during calibration.
By replacing:
Rot = np.array([[0.9999, 0.0109, 0.0068],[-0.0111, 0.9998, 0.0178],[-0.0066, -0.0179, 0.9998]])
with its transpose (= inverse for rotations) as:
Rot = np.array([[0.9999, 0.0109, 0.0068],[-0.0111, 0.9998, 0.0178],[-0.0066, -0.0179, 0.9998]]).T
You obtain the expected result.

Image-Processing: Converting normal pictures into FishEye images with intrinsic matrix

I need to synthesize many FishEye images with different intrinsic matrices based on normal pictures. I am following the method mentioned in this paper.
Ideally, if the algorithm is correct, the ideal fish eye effect should look like this:
.
But when I used my algorithm to convert a picture
it looks like this
So below is my code's flow:
1. First, I read the raw image with cv2
def read_img(image):
img = ndimage.imread(image) #this would return a 4-d array: [R,G,B,255]
img_shape = img.shape
print(img_shape)
#get the pixel coordinate
w = img_shape[1] #the width
# print(w)
h= img_shape[0] #the height
# print(h)
uv_coord = []
for u in range(w):
for v in range(h):
uv_coord.append([float(u),float(v)]) #this records the coord in the fashion of [x1,y1],[x1, y2], [x1, y3]....
return np.array(uv_coord)
Then, based on the paper:
r(θ) = k1θ + k2θ^3 + k3θ^5 + k4θ^7, (1)
where Ks are the distorted coefficients
Given pixel coordinates (x,y) in the pinhole projection image, the corresponding image coordinates (x',y')in the fisheye can be computed as:
x'=r(θ) cos(ϕ), y' = r(θ) sin(ϕ), (2)
where ϕ = arctan((y − y0)/(x − x0)), and (x0, y0) are the coordinates of the principal point in the pinhole projection image.
And then the image coordinates (x',y') is converted into pixel coordinates (xf,yf): (xf, yf):
*xf = mu * x' + u0, yf = mv * y' + v0,* (3)
where (u0, v0) are the coordinates of the principle points in the fisheye, and mu, mv denote the number of pixels per unit distance in the horizontal and vertica directions. So I am guessing there are just from the intrinsic matrix [fx, fy] and u0 v0 are the [cx, cy].
def add_distortion(sourceUV, dmatrix,Kmatrix):
'''This function is programmed to remove the pixel of the given original image coords
input arguments:
dmatrix -- the intrinsic matrix [k1,k2,k3,k4] for tweaking purposes
Kmatrix -- [fx, fy, cx, cy, s]'''
u = sourceUV[:,0] #width in x
v = sourceUV[:,1] #height in y
rho = np.sqrt(u**2 + v**2)
#get theta
theta = np.arctan(rho,np.full_like(u,1))
# rho_mat = np.array([rho, rho**3, rho**5, rho**7])
rho_mat = np.array([theta,theta**3, theta**5, theta**7])
#get the: rho(theta) = k1*theta + k2*theta**3 + k3*theta**5 + k4*theta**7
rho_d = dmatrix#rho_mat
#get phi
phi = np.arctan2((v - Kmatrix[3]), (u - Kmatrix[2]))
xd = rho_d * np.cos(phi)
yd = rho_d * np.sin(phi)
#converting the coords from image plane back to pixel coords
ud = Kmatrix[0] * (xd + Kmatrix[4] * yd) + Kmatrix[2]
vd = Kmatrix[1] * yd + Kmatrix[3]
return np.column_stack((ud,vd))
Then after gaining the distorded coordinates, I perform moving pixels in this way, where I think the problem might be:
def main():
image_name = "original.png"
img = cv2.imread(image_name)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #the cv2 read the image as BGR
w = img.shape[1]
h = img.shape[0]
uv_coord = read_img(image_name)
#for adding distortion
dmatrix = [-0.391942708316175,0.012746418822063 ,-0.001374061848026 ,0.005349692659231]
#the Intrinsic matrix of the original picture's
Kmatrix = np.array([9.842439e+02,9.808141e+02 , 1392/2, 2.331966e+02, 0.000000e+00])
# Kmatrix = np.array([2234.23470710156 ,2223.78349134123, 947.511596277837, 647.103139639432,-3.20443253476976]) #the distorted intrinsics
uv = add_distortion(uv_coord,dmatrix,Kmatrix)
i = 0
dstimg = np.zeros_like(img)
for x in range(w): #tthe coo
for y in range(h):
if i > (512 * 1392 -1):
break
xu = uv[i][0] #x, y1, y2, y3
yu = uv[i][1]
i +=1
# if new pixel is in bounds copy from source pixel to destination pixel
if 0 <= xu and xu < img.shape[1] and 0 <= yu and yu < img.shape[0]:
dstimg[int(yu)][int(xu)] = img[int(y)][int(x)]
img = Image.fromarray(dstimg, 'RGB')
img.save('my.png')
img.show()
However, this code does not perform in the way I want. Could you guys please help me with debugging it? I spent 3 days but I still could not see any problem with it. Thanks!!

Inconsistence of left-right image point reprojections after reprojectImageto3d

everyone. I'm trying to triangulate some points (dense reconstruction) lying on a plane in a setup which involves two cameras.
[Reference image]: https://imgur.com/gOps4vP and [The other image]: https://imgur.com/VIiH9Rv
First of all, I solve the relative pose problem using the 5pts algorithm on the undistorted points for the Essential Matrix estimation, the I recover the pose. I'm using RANSAC.
Then, I rectify the stereo pairs the usual way.
R1, R2, Pn1, Pn2, Q, _, _ = cv2.stereoRectify(K1, dcoeffs1, K2, dcoeffs2,
img1.shape[::-1], R, t,
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1)
# Compute the rigid transform that OpenCV apply to world points (USEFUL LATER)
# in order for the rectified reference camera to be K_new[I|0]
tn_1 = np.zeros((3,1)) # Cameras are never translated in the rectification
G1_rect = np.block([[R1, tn_1], [np.zeros((1,3)), 1.0]])
maps1 = cv2.initUndistortRectifyMap(K1, dcoeffs1, R1, Pn1, (1920,1080), cv2.CV_32FC1)
maps2 = cv2.initUndistortRectifyMap(K2, dcoeffs2, R2, Pn2, (1920,1080), cv2.CV_32FC1)
img1_remap = cv2.remap(img1, maps1[0], maps1[1], cv2.INTER_LANCZOS4)
img2_remap = cv2.remap(img2, maps2[0], maps2[1], cv2.INTER_LANCZOS4)
Result of the rectification:
[Rectified reference image] https://drive.google.com/open?id=10VfgXrXFO3_lYqtO9qJXr17Dc6F1PuXU
[The other one rectified] https://drive.google.com/open?id=13ZkeMiF5xEovGmX13LSQVaJ237hoJLX0
Now I call a function that recognize a known object in the images (target).
#Now call a function that recognize a known object in the images (target)
# Find target
target_corners, _ = dt.detectTarget(img_scene1, img_target, 0.5) # return 4 corners of the detected polygon
target_corners = target_corners[:,0,:]
# Compute mask for the target cutout:
target_mask = mp.maskPolygon(target_corners, img_scene1.shape[::-1]) # Output: mask of same dimension of the image
Target found (please note the highlighted corners):
[Target found] https://imgur.com/QjYV8tp
Then I compute the disparity map using StereoSGBM. I'm interested in the computation of the target disparity only (I'll mask all the other points).
With the Disparity map obtained and using the 4x4 projection Matrix Q given by stereoRectify, I perform the 3d reprojection of the disparity map.
# Compute disparity map
# https://docs.opencv.org/3.3.1/d2/d85/classcv_1_1StereoSGBM.html
window_size = 5
min_disp = 16
max_disp = 1024
num_disp = max_disp-min_disp # Deve essere divisibile per 16!
stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = window_size,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 10,
speckleWindowSize = 150,
speckleRange = 2
)
print('Calcolo SGBM della disparità...')
disp = stereo.compute(img_scene1, img_scene2).astype(np.float32) / 16.0
target_disparity = target_mask*disp
points = cv2.reprojectImageTo3D(target_disparity, Q)
# DEBUG:
cv2.namedWindow('scene1', cv2.WINDOW_NORMAL)
cv2.resizeWindow('scene1', 800,450)
cv2.imshow('scene1', img_scene1)
cv2.namedWindow('disparity', cv2.WINDOW_NORMAL)
cv2.resizeWindow('disparity', 800,450)
cv2.imshow('disparity', (disp-min_disp)/num_disp)
cv2.namedWindow('target_disparity', cv2.WINDOW_NORMAL)
cv2.resizeWindow('target_disparity', 800,450)
cv2.imshow('target_disparity', target_mask*(disp-min_disp)/num_disp)
cv2.waitKey()
cv2.destroyAllWindows()
# Obtain matrix of the target 3D points starting from disparity image obtained from reprojectImageTo3D()
mask_disp = disp > disp.min()
mask_inf = ~(np.isinf(points[:,:,0]) | np.isinf(points[:,:,1]) | np.isinf(points[:,:,2]))
mask_nan = ~(np.isnan(points[:,:,0]) | np.isnan(points[:,:,1]) | np.isnan(points[:,:,2]))
mask = mask_disp & mask_inf & mask_nan
pts3D = points[mask]
Now, I have 3d reconstructed the region of the images corresponding to the target. I noted that OpenCv, during camera rectification, apply a rigid transform to world points such that the reference original camera and the new (rectified) reference camera have the same extrinsics (R=eye(3) and t=[0,0,0]'). Infact, during rectification both cameras must be rotated, and I think OpenCV simply brings back the new cameras to a new reference such that the reference rectified camera has the same extrinsics of the original one. But this implies that the reconstructed 3d points will be expressed in a world reference that is not the world reference of the original camera!
So, applying the inverse rigid transform to the pts3D, we obtain a reconstruction in the original reference camera frame. (See code).
target3Dpts_hom = cv2.convertPointsToHomogeneous(target3Dpts)[:,0,:].T
target3Dpts_hom = G.T # target3Dpts_hom
new_target3Dpts = cv2.convertPointsFromHomogeneous(target3Dpts_hom.T[:,np.newaxis,:])[:,0,:]
Please NOTE that if I don't perform this operation, the pt3D reprojected on the original cameras by means of their projection matrices will not correspond to the target points!
Check reconstruction via reprojection; Now, i can reproject the new_target3Dpts:
Let me introduce the projection function that I call:
def proj_dist(P, dcoeffs, M):
import numpy as np
import cv2
K, R, t,_,_,_,_ = cv2.decomposeProjectionMatrix(P)
rotv, _ = cv2.Rodrigues(R)
# Projection. Returns a (N,2) shaped array
m,_ = cv2.projectPoints(M,rotv,t[0:-1],K,dcoeffs)
m = m.squeeze()
return m
Finally, the reprojections:
#P_kin = K_kin[eye(3),0] # Originals MPPs of two cameras
#P_rpi = K_rpi[R,t]
m0 = proj.proj_dist(P_kin,dcoeffs_kin,new_points).astype('int32')
for (x, y) in m0:
x = int(x)
y= int(y)
cv2.circle(img_kin, (x, y), 2, (255, 255, 0), 4)
cv2.namedWindow('frame1', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame1', 800,450)
cv2.imshow('frame1',img_kin)
cv2.waitKey(0)
m1 = proj.proj_dist(P_rpi,dcoeffs_rpi,new_points).astype('int32')
img_rpi1 = img_rpi.copy()
for (x, y) in m1:
x = int(x)
y = int(y)
cv2.circle(img_rpi1, (x, y), 2, (255, 255, 0), 4)
cv2.namedWindow('frame2', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame2', 800,450)
cv2.imshow('frame2',img_rpi1)
cv2.waitKey(0)
But, while the reprojected points on the original reference camera are correct, this is not true for the second one....The points are simply translated, but I can't explain why.
Results: [First frame repj] https://imgur.com/S4lo9Wz
[2nd frame repj. Error] https://imgur.com/y4igaEI
Any ideas? I will include all the code now.
Thank you.
SM
I solved the problem, which is not related with the reprojectImageto3D --that works fine--, but with this piece of code I've wrote and that I used to reproject the points onto the original frames:
def proj_dist(P, dcoeffs, M):
import numpy as np
import cv2
K, R, t,_,_,_,_ = cv2.decomposeProjectionMatrix(P)
rotv, _ = cv2.Rodrigues(R)
# Projection. Returns a (N,2) shaped array
m,_ = cv2.projectPoints(M,rotv,t[0:-1],K,dcoeffs)
m = m.squeeze()
return m
I've wrote my own function for points projection:
def proj(P, M, hom=0):
# proj(): Esegue la proiezione prospettica dei punti 3D M secondo la MPP P,
# sul piano immagine 2D di una camera pinhole.
import numpy as np
n = M.shape[1]
M = np.concatenate((M, np.ones((1,n))))
# Proiezione
m = P # M
m = m/m[2,:]
if hom !=1 :
# Passo a cartesiane
m = m[0:2,:]
return m
and the problem is solved!
My function does not take in account for lens distortion. I'll further investigate the problem related with the projectPoints() OpenCV function.

Writing robust (size invariant) circle detection (Watershed)

Edit: Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.
New: Fast radial symmetry transform approach which didn't quite work eiter (Edit 6).
I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. -> Get the single coins as single image files.
For this I used the Hough Circle Transform of open-cv:
(https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.html)
import sys
import cv2 as cv
import numpy as np
def main(argv):
## [load]
default_file = "data/newcommon_1euro.jpg"
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
src = cv.imread(filename, cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n')
return -1
## [load]
## [convert_to_gray]
# Convert it to gray
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [convert_to_gray]
## [reduce_noise]
# Reduce the noise to avoid false circle detection
gray = cv.medianBlur(gray, 5)
## [reduce_noise]
## [houghcircles]
rows = gray.shape[0]
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8,
param1=100, param2=30,
minRadius=0, maxRadius=120)
## [houghcircles]
## [draw]
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv.circle(src, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (255, 0, 255), 3)
## [draw]
## [display]
cv.imshow("detected circles", src)
cv.waitKey(0)
## [display]
return 0
if __name__ == "__main__":
main(sys.argv[1:])
I tried all parameters (rows, param1, param2, minRadius, and maxRadius) to optimize the results. This worked very well for one specific image but other images with different sized coins didn't work.
Examples:
Parameters
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 16,
param1=100, param2=30,
minRadius=0, maxRadius=120)
With the same parameters:
Changed to rows/8
I also tried two other approaches of this thread: writing robust (color and size invariant) circle detection with opencv (based on Hough transform or other features)
The approach of fireant leads to this result:
The approach of fraxel didn't work either.
For the first approach: This happens with all different sizes and also the min and max radius.
How could I change the code, so that the coin size is not important or that it finds the parameters itself?
Thank you in advance for any help!
Edit:
I tried the watershed algorithm of Open-cv, as suggested by Alexander Reynolds: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('data/P1190263.jpg')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv.watershed(img,markers)
img[markers == -1] = [255,0,0]
#Display:
cv.imshow("detected circles", img)
cv.waitKey(0)
It works very well on the test image of the open-cv website:
But it performs very bad on my own images:
I can't really think of a good reason why it's not working on my images?
Edit 2:
As suggested I looked at the intermediate images. The thresh looks not good in my opinion. Next, there is no difference between opening and dist_transform. The corresponding sure_fg shows the detected images.
thresh:
opening:
dist_transform:
sure_bg:
sure_fg:
Edit 3:
I tried all distanceTypes and maskSizes I could find, but the results were quite the same (https://www.tutorialspoint.com/opencv/opencv_distance_transformation.htm)
Edit 4:
Furthermore, I tried to change the (first) threshold function. I used different threshold values instead of the OTSU Function. The best one was with 160, but it was far from good:
In the tutorial it looks like this:
It seems like the coins are somehow too bright to be detected by this algorithm, but I don't know how to improve it?
Edit 5:
Changing the overall contrast and brightness of the image (with cv.convertScaleAbs) didn't improve the results. Increasing the contrast however should increase the "difference" between foreground and background, at least on the normal image. But it even got worse. The corresponding threshold image didn't improved (didn't get more white pixel).
Edit 6: I tried another approach, the fast radial symmetry transform (from here https://github.com/ceilab/frst_python)
import cv2
import numpy as np
def gradx(img):
img = img.astype('int')
rows, cols = img.shape
# Use hstack to add back in the columns that were dropped as zeros
return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))
def grady(img):
img = img.astype('int')
rows, cols = img.shape
# Use vstack to add back the rows that were dropped as zeros
return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))
# Performs fast radial symmetry transform
# img: input image, grayscale
# radii: integer value for radius size in pixels (n in the original paper); also used to size gaussian kernel
# alpha: Strictness of symmetry transform (higher=more strict; 2 is good place to start)
# beta: gradient threshold parameter, float in [0,1]
# stdFactor: Standard deviation factor for gaussian kernel
# mode: BRIGHT, DARK, or BOTH
def frst(img, radii, alpha, beta, stdFactor, mode='BOTH'):
mode = mode.upper()
assert mode in ['BRIGHT', 'DARK', 'BOTH']
dark = (mode == 'DARK' or mode == 'BOTH')
bright = (mode == 'BRIGHT' or mode == 'BOTH')
workingDims = tuple((e + 2 * radii) for e in img.shape)
# Set up output and M and O working matrices
output = np.zeros(img.shape, np.uint8)
O_n = np.zeros(workingDims, np.int16)
M_n = np.zeros(workingDims, np.int16)
# Calculate gradients
gx = gradx(img)
gy = grady(img)
# Find gradient vector magnitude
gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))
# Use beta to set threshold - speeds up transform significantly
gthresh = np.amax(gnorms) * beta
# Find x/y distance to affected pixels
gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0),
radii).round().astype(int);
gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0),
radii).round().astype(int);
# Iterate over all pixels (w/ gradient above threshold)
for coords, gnorm in np.ndenumerate(gnorms):
if gnorm > gthresh:
i, j = coords
# Positively affected pixel
if bright:
ppve = (i + gpx[i, j], j + gpy[i, j])
O_n[ppve] += 1
M_n[ppve] += gnorm
# Negatively affected pixel
if dark:
pnve = (i - gpx[i, j], j - gpy[i, j])
O_n[pnve] -= 1
M_n[pnve] -= gnorm
# Abs and normalize O matrix
O_n = np.abs(O_n)
O_n = O_n / float(np.amax(O_n))
# Normalize M matrix
M_max = float(np.amax(np.abs(M_n)))
M_n = M_n / M_max
# Elementwise multiplication
F_n = np.multiply(np.power(O_n, alpha), M_n)
# Gaussian blur
kSize = int(np.ceil(radii / 2))
kSize = kSize + 1 if kSize % 2 == 0 else kSize
S = cv2.GaussianBlur(F_n, (kSize, kSize), int(radii * stdFactor))
return S
img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
result = frst(gray, 60, 2, 0, 1, mode='BOTH')
cv2.imshow("detected circles", result)
cv2.waitKey(0)
I only get this nearly black output (it has some very dark grey shadows). I don't know what to change and would be thankful for help!

How to use PIL (Python Image Library) rotate image and let black background to be transparency

I want to rotate a gray "test" image and paste it onto a blue background image. Now I just can remove the black color after rotate my gray "test" image, but their is now a white color section. How can I use Python to change the "white" color section to blue?
Here is my code, can someone help me? I'd appreciate it.
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
f = Image.new( 'RGBA', rot.size, (255,)*4 )
im2 = Image.composite( rot, f, rot )
im2.convert(src_im.mode)
im2_width, im2_height = im2.size
cut_box = (0, 0, im2_width, im2_height )
paste_box = ( left, top, im2_width+left, im2_height+top )
region = im2.crop( cut_box )
dst_im.paste( region, paste_box )
dst_im.save("test.gif")
I have the impression that your code could be simplified as follows:
from PIL import Image
src_im = Image.open("winter3.jpg")
angle = 45
size = 100, 100
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
dst_im.paste( rot, (50, 50), rot )
dst_im.save("test.png")
This gives the following result:
Another answer using PIL is clearly more succinct. I had a similar problem and had the image in an ndarray. Yipes, mine came out way more complicated than user1202136. I'm posting it only because it demonstrates another solution using numpy and array stacking, but user1202136's solution is much better.
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
def rgba(rgb_img, alpha):
'''
' takes an rgb ndarray r x c x 3 of dtype=uint8
' and adds an alpha 0-255 to each pixel
'''
rows = len(rgb_img) # get image dimensions
columns = len(rgb_img[0])
rgb_flat = rgb_img.reshape([rows * columns, 3]) # list of rgb pixels
a = np.zeros([rows*columns, 1], dtype=np.uint8) # alpha for each pixel
a.fill(alpha)
rgba = np.column_stack([rgb_flat, a]) # place 4th column
return rgba.reshape([rows, columns, 4]) # reform into r x c x 4
def pad_with_transparent_pixels(rgba_img):
'''
' takes an rgba image r x c
' and places within a buffer of [ 0 0 0 0] to become square,
' with sides = diagonal of img
'''
rows = len(rgba_img) # get image dimensions
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**0.5
diag = int(diag) + 1
top_pad_height = (diag-rows)/2 + 1
left_pad_width = (diag-columns)/2 + 1
top_pad = np.zeros([top_pad_height, diag, 4], dtype=np.uint8)
left_pad = np.zeros([rows, left_pad_width, 4], dtype=np.uint8)
right_pad = np.zeros([rows,
# assures total width of top_pad for row_stack:
diag - left_pad_width - columns,
4 ],
dtype=np.uint8)
center = np.column_stack([left_pad, rgba_img, right_pad])
return np.row_stack([top_pad, center, top_pad])
def clean_rotate(rgba_img,angle):
rows = len(rgba_img)
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**.5
diag = int(diag)
pad_img = pad_with_transparent_pixels(rgba_img)
rot_img = scipy.ndimage.rotate(pad_img, angle)
rot_img_rows = len(rot_img)
rot_img_columns = len(rot_img[0])
crop_side = max(1,(rot_img_columns - diag) / 2) #max to avoid splicing [:0]
crop_top = max(1,(rot_img_rows - diag) / 2)
print diag, crop_side, crop_top
return rot_img[crop_top:-crop_top,crop_side:-crop_side]
img = plt.imread('C:\\Users\\bbrown\\Desktop\\Maurine.jpg') # read in a jpg
figure, axes = plt.subplots(1, 2) # create 1x2 grid of axes
axes[0].imshow(img) # place image on first axes
rgba_image = rgba(img, 255) # create an opaque rgba image
rot_img = clean_rotate(rgba_image,50)
#make a pattern of 10 images
for i in range(10):
rot_img = clean_rotate(rgba_image,5*i)
axes[1].imshow(rot_img)
plt.show()

Categories

Resources