Python: Show cartesian image in polar plot - python

Description:
I have this data represented in a cartesian coordinate system with 256 columns and 640 rows. Each column represents an angle, theta, from -65 deg to 65 deg. Each row represents a range, r, from 0 to 20 m.
An example is given below:
With the following code I try to make a grid and transform each pixel location to the location it would have on a polar grid:
def polar_image(image, bearings):
(h,w) = image.shape
x_max = (np.ceil(np.sin(np.deg2rad(np.max(bearings)))*h)*2+1).astype(int)
y_max = (np.ceil(np.cos(np.deg2rad(np.min(np.abs(bearings))))*h)+1).astype(int)
blank = np.zeros((y_max,x_max,1), np.uint8)
for i in range(w):
for j in range(h):
X = (np.sin(np.deg2rad( bearings[i]))*j)
Y = (-np.cos(np.deg2rad(bearings[i]))*j)
blank[(Y+h).astype(int),(X+562).astype(int)] = image[h-1-j,w-1-i]
return blank
This returns an image as below:
Questions:
This is sort of what I actually want to achieve except from two things:
1) there seem to be some artifacts in the new image and also the mapping seems a bit coarse.
Does someone have a suggestion on how to interpolate to get rid of this?
2) The image remains in a Cartesian representation, meaning that I don't have any polar gridlines, nor can I visualize intervals of range/angle.
Anybody know how to visualize the polar grids with axis ticks in theta and range?

You can use pyplot.pcolormesh() to plot the converted mesh:
import numpy as np
import pylab as pl
img = pl.imread("c:/tmp/Wnov4.png")
angle_max = np.deg2rad(65)
h, w = img.shape
angle, r = np.mgrid[-angle_max:angle_max:h*1j, 0:20:w*1j]
x = r * np.sin(angle)
y = r * np.cos(angle)
fig, ax = pl.subplots()
ax.set_aspect("equal")
pl.pcolormesh(x, y, img, cmap="gray");
or you can use the remap() in OpenCV to convert it to a new image:
import cv2
import numpy as np
from PIL import Image
img = cv2.imread(r"c:/tmp/Wnov4.png", cv2.IMREAD_GRAYSCALE)
angle_max = np.deg2rad(65)
r_max = 20
x = np.linspace(-20, 20, 800)
y = np.linspace(20, 0, 400)
y, x = np.ix_(y, x)
r = np.hypot(x, y)
a = np.arctan2(x, y)
map_x = r / r_max * img.shape[1]
map_y = a / (2 * angle_max) * img.shape[0] + img.shape[0] * 0.5
img2 = cv2.remap(img, map_x.astype(np.float32), map_y.astype(np.float32), cv2.INTER_CUBIC)
Image.fromarray(img2)

Related

Rotate polygons without cutting edges

I am writing an augmentation code to rotate annotated polygons inside images. I wrote a code but it's not working right. Just Copy paste the code and you can get the results. Thank you for helping me out.
Image:
Need to rotate the image as well as a polygon for respective angle. Currently, I am not able to rotate the polygon
The image is rotated but the polygon is still in its place.
I tried This code. It rotates the polygon but not at the right position
import math
from PIL import Image, ImageDraw
from PIL import ImagePath
from PIL import Image
import matplotlib.pyplot as plt
from math import sin, cos, radians
import requests
from io import BytesIO
def rotatePolygon(polygon, degrees, height, width):
"""
Description:
Rotate polygon the given angle about its center.
Input:
polygon (list of tuples) : list of tuples with (x,y) cordinates
e.g [(1,2), (2,3), (4,5)]
degrees int : Rotation Degrees
Output:
polygon (list of tuples) : Polygon rotated on angle(degrees)
e.g [(1,2), (2,3), (4,5)]
"""
# Convert angle to radians
theta = radians(degrees)
# Getting sin and cos with respect to theta
cosang, sinang = cos(theta), sin(theta)
# find center point of Polygon to use as pivot
y, x = [i for i in zip(*polygon)]
# find center point of Polygon to use as pivot
cx = width / 2
cy = height / 2
# Rotating every point
new_points = []
for x, y in zip(x, y):
tx, ty = x-cx, y-cy
new_x = (tx*cosang + ty*sinang) + cx
new_y = (-tx*sinang + ty*cosang) + cy
new_points.append((new_y, new_x))
return new_points
# Polygon
xy = [(85, 384), (943, 374), (969, 474), (967, 527), (12, 540), (7, 490)]
degrees = 270
# Getting Image from URL
try:
img = Image.open("polygon_image.png")
except:
url = "https://github.com/SohaibAnwaar/Mask---RCNN-Polygons-/blob/main/2_image_augmentation/extras/problamatic_image.jpg?raw=true"
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.save("polygon_image.png")
# Rotating Image
rotated_image = img.rotate(degrees,expand = True)
h, w = img.size
print("NotRotated", xy)
rotated_xy = rotatePolygon(xy, 360 - (degrees), h, w)
# Ploting Rotated Image
img1 = ImageDraw.Draw(rotated_image)
img1.polygon(rotated_xy, fill ="#FFF000", outline ="blue")
# Ploting Straight Image
img1 = ImageDraw.Draw(img)
img1.polygon(xy, fill ="#FFF000", outline ="blue")
plt.imshow(rotated_image)
plt.show()
plt.imshow(img)
plt.show()
Rotation equations are:
xnew = x * cos(theta) - y * sin(theta)
ynew = x * sin(theta) + y * cos(theta)
only mistake you are doing is this:
new_x = (tx*cosang - ty*sinang) + cy
new_y = (tx*sinang + ty*cosang) + cx
After rotating image, cx and cy should be changed
Your complete code is as below:
import math
import numpy as np
from PIL import Image, ImageDraw
from PIL import ImagePath
from PIL import Image
import matplotlib.pyplot as plt
from math import sin, cos, radians
import requests
from io import BytesIO
def rotatePolygon(polygon, degrees, height, width):
"""
Description:
Rotate polygon the given angle about its center.
Input:
polygon (list of tuples) : list of tuples with (x,y) cordinates
e.g [(1,2), (2,3), (4,5)]
degrees int : Rotation Degrees
Output:
polygon (list of tuples) : Polygon rotated on angle(degrees)
e.g [(1,2), (2,3), (4,5)]
"""
# Convert angle to radians
theta = radians(degrees)
# Getting sin and cos with respect to theta
cosang, sinang = cos(theta), sin(theta)
# find center point of Polygon to use as pivot
y, x = [i for i in zip(*polygon)]
# find center point of Polygon to use as pivot
cx1 = width[0] / 2
cy1 = height[0] / 2
cx2 = width[1] / 2
cy2 = height[1] / 2
# Rotating every point
new_points = []
for x, y in zip(x, y):
tx, ty = x-cx1, y-cy1
new_x = (tx*cosang - ty*sinang) + cx2
new_y = (tx*sinang + ty*cosang) + cy2
new_points.append((new_y, new_x))
return new_points
# Polygon
xy = [(85, 384), (943, 374), (969, 474), (967, 527), (12, 540), (7, 490)]
degrees = 270
# Getting Image from URL
try:
img = Image.open("polygon_image.png")
except:
url = "https://github.com/SohaibAnwaar/Mask---RCNN-Polygons-/blob/main/2_image_augmentation/extras/problamatic_image.jpg?raw=true"
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.save("polygon_image.png")
# Rotating Image
rotated_image = img.rotate(degrees,expand = True)
h1, w1 = img.size
h2, w2 = rotated_image.size
print("NotRotated", xy)
rotated_xy = rotatePolygon(xy, degrees, [h1,h2], [w1,w2])
# Ploting Rotated Image
img1 = ImageDraw.Draw(rotated_image)
img1.polygon(rotated_xy, fill ="#FFF000", outline ="blue")
# Ploting Straight Image
img1 = ImageDraw.Draw(img)
img1.polygon(xy, fill ="#FFF000", outline ="blue")
plt.imshow(rotated_image)
plt.show()
plt.imshow(img)
plt.show()
For a rotation to the right by a right angle, use the following equations (assuming X/U left-to-right and Y/V top-down):
U = H - Y
V = X
where the image size is W x H.

How to draw a HSV color wheel using matplotlib

is it possible to draw a HSV color wheel using matplotlib in python? I want to get an HSV color wheel like the following image as a reference image of the optical flow. In this image, every HSV color can be expressed as a vector where the direction is encoded in hue and length is encoded in saturation so that I can easily compare it with optical flow. I have searched online but I could not find a satisfied solution. Any help will be grateful!
The following code uses Colour for the HSV to RGB conversion:
def colour_wheel(samples=1024, clip_circle=True, method='Colour'):
xx, yy = np.meshgrid(
np.linspace(-1, 1, samples), np.linspace(-1, 1, samples))
S = np.sqrt(xx ** 2 + yy ** 2)
H = (np.arctan2(xx, yy) + np.pi) / (np.pi * 2)
HSV = colour.utilities.tstack([H, S, np.ones(H.shape)])
RGB = colour.HSV_to_RGB(HSV)
if clip_circle == True:
RGB[S > 1] = 0
A = np.where(S > 1, 0, 1)
else:
A = np.ones(S.shape)
if method.lower()== 'matplotlib':
RGB = colour.utilities.orient(RGB, '90 CW')
elif method.lower()== 'nuke':
RGB = colour.utilities.orient(RGB, 'Flip')
RGB = colour.utilities.orient(RGB, '90 CW')
R, G, B = colour.utilities.tsplit(RGB)
return colour.utilities.tstack([R, G, B, A])
We use it in this interactive Jupyter Notebook Matplotlib Widget:
The repository is available here: https://github.com/colour-science/gamut-mapping-ramblings
The answer by Kel Solaar is pretty good, but it doesn't actually use matplotlib.
Here's a similar result using matplolib, numpy and coloursys.
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
rho = np.linspace(0,1,100) # Radius of 1, distance from center to outer edge
phi = np.linspace(0, math.pi*2.,1000 ) # in radians, one full circle
RHO, PHI = np.meshgrid(rho,phi) # get every combination of rho and phi
h = (PHI-PHI.min()) / (PHI.max()-PHI.min()) # use angle to determine hue, normalized from 0-1
h = np.flip(h)
s = RHO # saturation is set as a function of radias
v = np.ones_like(RHO) # value is constant
# convert the np arrays to lists. This actually speeds up the colorsys call
h,s,v = h.flatten().tolist(), s.flatten().tolist(), v.flatten().tolist()
c = [colorsys.hsv_to_rgb(*x) for x in zip(h,s,v)]
c = np.array(c)
ax.scatter(PHI, RHO, c=c)
_ = ax.axis('off')
Note that this results in figure where blue/purple points upwards, while red points towards the right. In my experience, that's the typical optical flow hue plot. To achieve the image OP posted, change the following, so the figure starts drawing on the left instead of the right:
phi = np.linspace(math.pi, math.pi*3.,1000 ) # in radians, one full circle

Image-Processing: Converting normal pictures into FishEye images with intrinsic matrix

I need to synthesize many FishEye images with different intrinsic matrices based on normal pictures. I am following the method mentioned in this paper.
Ideally, if the algorithm is correct, the ideal fish eye effect should look like this:
.
But when I used my algorithm to convert a picture
it looks like this
So below is my code's flow:
1. First, I read the raw image with cv2
def read_img(image):
img = ndimage.imread(image) #this would return a 4-d array: [R,G,B,255]
img_shape = img.shape
print(img_shape)
#get the pixel coordinate
w = img_shape[1] #the width
# print(w)
h= img_shape[0] #the height
# print(h)
uv_coord = []
for u in range(w):
for v in range(h):
uv_coord.append([float(u),float(v)]) #this records the coord in the fashion of [x1,y1],[x1, y2], [x1, y3]....
return np.array(uv_coord)
Then, based on the paper:
r(θ) = k1θ + k2θ^3 + k3θ^5 + k4θ^7, (1)
where Ks are the distorted coefficients
Given pixel coordinates (x,y) in the pinhole projection image, the corresponding image coordinates (x',y')in the fisheye can be computed as:
x'=r(θ) cos(ϕ), y' = r(θ) sin(ϕ), (2)
where ϕ = arctan((y − y0)/(x − x0)), and (x0, y0) are the coordinates of the principal point in the pinhole projection image.
And then the image coordinates (x',y') is converted into pixel coordinates (xf,yf): (xf, yf):
*xf = mu * x' + u0, yf = mv * y' + v0,* (3)
where (u0, v0) are the coordinates of the principle points in the fisheye, and mu, mv denote the number of pixels per unit distance in the horizontal and vertica directions. So I am guessing there are just from the intrinsic matrix [fx, fy] and u0 v0 are the [cx, cy].
def add_distortion(sourceUV, dmatrix,Kmatrix):
'''This function is programmed to remove the pixel of the given original image coords
input arguments:
dmatrix -- the intrinsic matrix [k1,k2,k3,k4] for tweaking purposes
Kmatrix -- [fx, fy, cx, cy, s]'''
u = sourceUV[:,0] #width in x
v = sourceUV[:,1] #height in y
rho = np.sqrt(u**2 + v**2)
#get theta
theta = np.arctan(rho,np.full_like(u,1))
# rho_mat = np.array([rho, rho**3, rho**5, rho**7])
rho_mat = np.array([theta,theta**3, theta**5, theta**7])
#get the: rho(theta) = k1*theta + k2*theta**3 + k3*theta**5 + k4*theta**7
rho_d = dmatrix#rho_mat
#get phi
phi = np.arctan2((v - Kmatrix[3]), (u - Kmatrix[2]))
xd = rho_d * np.cos(phi)
yd = rho_d * np.sin(phi)
#converting the coords from image plane back to pixel coords
ud = Kmatrix[0] * (xd + Kmatrix[4] * yd) + Kmatrix[2]
vd = Kmatrix[1] * yd + Kmatrix[3]
return np.column_stack((ud,vd))
Then after gaining the distorded coordinates, I perform moving pixels in this way, where I think the problem might be:
def main():
image_name = "original.png"
img = cv2.imread(image_name)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #the cv2 read the image as BGR
w = img.shape[1]
h = img.shape[0]
uv_coord = read_img(image_name)
#for adding distortion
dmatrix = [-0.391942708316175,0.012746418822063 ,-0.001374061848026 ,0.005349692659231]
#the Intrinsic matrix of the original picture's
Kmatrix = np.array([9.842439e+02,9.808141e+02 , 1392/2, 2.331966e+02, 0.000000e+00])
# Kmatrix = np.array([2234.23470710156 ,2223.78349134123, 947.511596277837, 647.103139639432,-3.20443253476976]) #the distorted intrinsics
uv = add_distortion(uv_coord,dmatrix,Kmatrix)
i = 0
dstimg = np.zeros_like(img)
for x in range(w): #tthe coo
for y in range(h):
if i > (512 * 1392 -1):
break
xu = uv[i][0] #x, y1, y2, y3
yu = uv[i][1]
i +=1
# if new pixel is in bounds copy from source pixel to destination pixel
if 0 <= xu and xu < img.shape[1] and 0 <= yu and yu < img.shape[0]:
dstimg[int(yu)][int(xu)] = img[int(y)][int(x)]
img = Image.fromarray(dstimg, 'RGB')
img.save('my.png')
img.show()
However, this code does not perform in the way I want. Could you guys please help me with debugging it? I spent 3 days but I still could not see any problem with it. Thanks!!

How to find a point after wrapTransform?

I have a set of coordinates/points I found under the original image before warpPerspective, how do I get the corresponding points in the now cropped & corrected image which is perspective corrected ?
For example:
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread('sudoku.png')
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
point = np.array([[10,10]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(300,300))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
How do I get the new coordinate [10,10] in img map to the dst image ?
You have to perform the same transformations (mathematically) as you have done on the image. In this case it means using cv2.perspectiveTransform (note that the input needs to have 1 row per point, 1 column, and 2 channels -- first being X, second Y cordinate).
This function will transform all the input points, it doesn't perform and cropping. You will need to post-process the transformed coordinates, and discard ones that fall outside the crop area. In your case you want to retain points where (0 <= x < 300) and (0 <= y < 300).
Sample code:
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread('sudoku.png')
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
points = np.float32([[[10, 10]], [[116,128]], [[254,261]]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(300,300))
# Transform the points
transformed = cv.perspectiveTransform(points, M)
# Perform the cropping -- filter out points that are outside the crop area
cropped = []
for pt in transformed:
x, y = pt[0]
if x >= 0 and x < dst.shape[1] and y >= 0 and y < dst.shape[0]:
print "Valid point (%d, %d)" % (x, y)
cropped.append([[x,y]])
else:
print "Out-of-bounds point (%d, %d)" % (x, y)
# Turn it back into a single numpy array
cropped = np.hstack(cropped)
# Visualize
plt.subplot(121)
plt.imshow(img)
for pt in points:
x, y = pt[0]
plt.scatter(x, y, s=100, c='red', marker='x')
plt.title('Input')
plt.subplot(122)
plt.imshow(dst)
for pt in transformed:
x, y = pt[0]
plt.scatter(x, y, s=100, c='red', marker='x')
plt.title('Output')
plt.show()
Console Output:
Out-of-bounds point (-53, -63)
Valid point (63, 67)
Valid point (192, 194)
Visualization:

Python Image distortion

I'm trying to apply a ripple effect to an image in python.
I found Pillow's im.transform(im.size, Image.MESH,.... is it possible?
Maybe I have to load the image with numpy and apply the algorithm.
I also found this: http://www.pygame.org/project-Water+Ripples-1239-.html
another way manually but I don't know any algorithm. this is my start. it doesn't do anything...
#!/usr/bin/env python3
from PIL import Image
import sys
import numpy
import math
im = Image.open(sys.argv[1])
im.show()
matrix = numpy.asarray(im)
width = im.size[0]
height = im.size[1]
amplitude = ? # parameters
frequency = ?
matrix_dest = numpy.zeros((im.size[0],im.size[1],3))
for x in range(0, width):
for y in range(0, height):
pass # ç_ç
im2 = Image.fromarray(numpy.uint8(matrix_dest))
im2.show()
EDIT:
I'd really like to keep this structure (using pillow. I already use extensivly in my project and if I can I wouldn't add any other dependency) and not including scipi or matplotlib..
With the following code I have the distortion I wanted, but colors are screwed up.
Maybe I have to apply the distortion to R,G,B planes and then compose the result in one image.
Or palettize the image and then apply the original palette.
(Btw the image would be used as a texture to display moving water in a 3D environment.)
im = Image.open(sys.argv[1])
im.show()
m = numpy.asarray(im)
m2 = numpy.zeros((im.size[0],im.size[1],3))
width = im.size[0]
height = im.size[1]
A = m.shape[0] / 3.0
w = 1.0 / m.shape[1]
shift = lambda x: A * numpy.sin(2.0*numpy.pi*x * w)
for i in range(m.shape[0]):
print(int(shift(i)))
m2[:,i] = numpy.roll(m[:,i], int(shift(i)))
im2 = Image.fromarray(numpy.uint8(m2))
im2.show()
You could use np.roll to rotate each row or column according to some sine function.
from scipy.misc import lena
import numpy as np
import matplotlib.pyplot as plt
img = lena()
A = img.shape[0] / 3.0
w = 2.0 / img.shape[1]
shift = lambda x: A * np.sin(2.0*np.pi*x * w)
for i in range(img.shape[0]):
img[:,i] = np.roll(img[:,i], int(shift(i)))
plt.imshow(img, cmap=plt.cm.gray)
plt.show()
Why don't you try something like:
# import scipy
# import numpy as np
for x in range(cols):
column = im[:,x]
y = np.floor(sin(x)*10)+10
kernel = np.zeros((20,1))
kernel[y] = 1
scipy.ndimage.filters.convolve(col,kernel,'nearest')
I threw this together just right now, so you'll need to tweak it a bit. The frequency of the sin is definitely too high, check here. But I think overall this should work.
I had a similar problem where sometimes the colors appear to be messed up (getting some weird red lines) after applying the sin when attempting the proposed solutions here. Couldn't resolve it.
I understand the original poster doesn't want more dependencies if possible, but for those unrestricted, here is a an alternative sample solution provided by scikit docs:
http://scikit-image.org/docs/dev/auto_examples/transform/plot_piecewise_affine.html#sphx-glr-auto-examples-transform-plot-piecewise-affine-py
Copying from the doc above:
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.astronaut()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
fig, ax = plt.subplots()
ax.imshow(out)
ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
ax.axis((0, out_cols, out_rows, 0))
plt.show()

Categories

Resources