I'm drawing a bunch of lines with the Python Imaging Library's ImageDraw.line(), but they look horrid since I can't find a way to anti-alias them. How can I anti-alias lines in PIL?
This is a really quickly hacked together function to draw an anti-aliased line with PIL that I wrote after googling for the same issue, seeing this post and failing to install aggdraw and being on a tight deadline. It's an implementation of Xiaolin Wu's line algorithm. I hope it helps anyone googling for the same thing!!
:)
"""Library to draw an antialiased line."""
# http://stackoverflow.com/questions/3122049/drawing-an-anti-aliased-line-with-thepython-imaging-library
# https://en.wikipedia.org/wiki/Xiaolin_Wu%27s_line_algorithm
import math
def plot(draw, img, x, y, c, col, steep, dash_interval):
"""Draws an antiliased pixel on a line."""
if steep:
x, y = y, x
if x < img.size[0] and y < img.size[1] and x >= 0 and y >= 0:
c = c * (float(col[3]) / 255.0)
p = img.getpixel((x, y))
x = int(x)
y = int(y)
if dash_interval:
d = dash_interval - 1
if (x / dash_interval) % d == 0 and (y / dash_interval) % d == 0:
return
draw.point((x, y), fill=(
int((p[0] * (1 - c)) + col[0] * c),
int((p[1] * (1 - c)) + col[1] * c),
int((p[2] * (1 - c)) + col[2] * c), 255))
def iround(x):
"""Rounds x to the nearest integer."""
return ipart(x + 0.5)
def ipart(x):
"""Floors x."""
return math.floor(x)
def fpart(x):
"""Returns the fractional part of x."""
return x - math.floor(x)
def rfpart(x):
"""Returns the 1 minus the fractional part of x."""
return 1 - fpart(x)
def draw_line_antialiased(draw, img, x1, y1, x2, y2, col, dash_interval=None):
"""Draw an antialised line in the PIL ImageDraw.
Implements the Xialon Wu antialiasing algorithm.
col - color
"""
dx = x2 - x1
if not dx:
draw.line((x1, y1, x2, y2), fill=col, width=1)
return
dy = y2 - y1
steep = abs(dx) < abs(dy)
if steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
dx, dy = dy, dx
if x2 < x1:
x1, x2 = x2, x1
y1, y2 = y2, y1
gradient = float(dy) / float(dx)
# handle first endpoint
xend = round(x1)
yend = y1 + gradient * (xend - x1)
xgap = rfpart(x1 + 0.5)
xpxl1 = xend # this will be used in the main loop
ypxl1 = ipart(yend)
plot(draw, img, xpxl1, ypxl1, rfpart(yend) * xgap, col, steep,
dash_interval)
plot(draw, img, xpxl1, ypxl1 + 1, fpart(yend) * xgap, col, steep,
dash_interval)
intery = yend + gradient # first y-intersection for the main loop
# handle second endpoint
xend = round(x2)
yend = y2 + gradient * (xend - x2)
xgap = fpart(x2 + 0.5)
xpxl2 = xend # this will be used in the main loop
ypxl2 = ipart(yend)
plot(draw, img, xpxl2, ypxl2, rfpart(yend) * xgap, col, steep,
dash_interval)
plot(draw, img, xpxl2, ypxl2 + 1, fpart(yend) * xgap, col, steep,
dash_interval)
# main loop
for x in range(int(xpxl1 + 1), int(xpxl2)):
plot(draw, img, x, ipart(intery), rfpart(intery), col, steep,
dash_interval)
plot(draw, img, x, ipart(intery) + 1, fpart(intery), col, steep,
dash_interval)
intery = intery + gradient
I had a similar problem, my lines had rough edges where changing directions. I took a clue from how lines are drawn in IOS and came up with this code. It puts rounded line caps on the ends of the lines and really cleans things up. Not exactly anti-aliasing, but am totally new to PIL and had such a hard time finding an answer I figured I would share. Needs some tweaking and there is probably a better way but does what I need :)
from PIL import Image
import ImageDraw
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
class DrawLines:
def draw(self, points, color, imageName):
img = Image.new("RGBA", [1440,1080], (255,255,255,0))
draw = ImageDraw.Draw(img)
linePoints = []
for point in points:
draw.ellipse((point.x-7, point.y-7, point.x+7, point.y+7), fill=color)
linePoints.append(point.x)
linePoints.append(point.y)
draw.line(linePoints, fill=color, width=14)
img.save(imageName)
p1 = Point(100,200)
p2 = Point(190,250)
points = [p1,p2]
red = (255,0,0)
drawLines = DrawLines()
drawLines.draw(points, red, "C:\\test.png")
Related
I am trying to detect the lines within an image using the Hough Transformation. Therefore I first create the accumulator like this:
from math import hypot, pi, cos, sin
from PIL import Image
import numpy as np
import cv2 as cv
import math
def hough(img):
thetaAxisSize = 460 #Width of the hough space image
rAxisSize = 360 #Height of the hough space image
rAxisSize= int(rAxisSize/2)*2 #we make sure that this number is even
img = im.load()
w, h = im.size
houghed_img = Image.new("L", (thetaAxisSize, rAxisSize), 0) #legt Bildgroesse fest
pixel_houghed_img = houghed_img.load()
max_radius = hypot(w, h)
d_theta = pi / thetaAxisSize
d_rho = max_radius / (rAxisSize/2)
#Accumulator
for x in range(0, w):
for y in range(0, h):
treshold = 255
col = img[x, y]
if col >= treshold: #determines for each pixel at (x,y) if there is enough evidence of a straight line at that pixel.
for vx in range(0, thetaAxisSize):
theta = d_theta * vx #angle between the x axis and the line connecting the origin with that closest point.
rho = x*cos(theta) + y*sin(theta) #distance from the origin to the closest point on the straight line
vy = rAxisSize/2 + int(rho/d_rho+0.5) #Berechne Y-Werte im hough space image
pixel_houghed_img[vx, vy] += 1 #voting
return houghed_imgcode here
And then call the function like this:
im = Image.open("img3.pgm").convert("L")
houghed_img = hough(im)
houghed_img.save("ho.bmp")
houghed_img.show()
The result seems to be okay:
So here comes the problem. I know want to find the top 3 highest values in the hough space and transform it back to 3 lines. The highest values should be the strongest lines.
Therefore I am first looking for the highest values within the pixel array and take the X and Y values of the maxima I found. From my understading this X and Y values are my rho and theta. I finding the maxima like this:
def find_maxima(houghed_img):
w, h = houghed_img.size
max_radius = hypot(w, h)
pixel_houghed_img = houghed_img.load()
max1, max2, max3 = 0, 0, 0
x1position, x2position, x3position = 0, 0, 0
y1position, y2position, y3position = 0, 0, 0
rho1, rho2, rho3 = 0, 0, 0
theta1, theta2, theta3 = 0, 0, 0
for x in range(1, w):
for y in range(1, h):
value = pixel_houghed_img[x, y]
if(value > max1):
max1 = value
x1position = x
y1position = y
rho1 = x
theta1 = y
elif(value > max2):
max2 = value
x2position = x
x3position = y
rho2 = x
theta2 = y
elif(value > max3):
max3 = value
x3position = x
y3position = y
rho3 = x
theta3 = y
print('max', max1, max2, max3)
print('rho', rho1, rho2, rho3)
print('theta', theta1, theta2, theta3)
# Results of the print:
# ('max', 255, 255, 255)
# ('rho', 1, 1, 1)
# ('theta', 183, 184, 186)
return rho1, theta1, rho2, theta2, rho3, theta3
And now I want to use this rho and theta values to draw the detected lines. I am doing this with the following code:
img_copy = np.ones(im.size)
rho1, theta1, rho2, theta2, rho3, theta3 = find_maxima(houghed_img)
a1 = math.cos(theta1)
b1 = math.sin(theta1)
x01 = a1 * rho1
y01 = b1 * rho1
pt11 = (int(x01 + 1000*(-b1)), int(y01 + 1000*(a1)))
pt21 = (int(x01 - 1000*(-b1)), int(y01 - 1000*(a1)))
cv.line(img_copy, pt11, pt21, (0,0,255), 3, cv.LINE_AA)
a2 = math.cos(theta2)
b2 = math.sin(theta2)
x02 = a2 * rho2
y02 = b2 * rho2
pt12 = (int(x02 + 1000*(-b2)), int(y02 + 1000*(a2)))
pt22 = (int(x02 - 1000*(-b2)), int(y02 - 1000*(a2)))
cv.line(img_copy, pt12, pt22, (0,0,255), 3, cv.LINE_AA)
a3 = math.cos(theta3)
b3 = math.sin(theta3)
x03 = a3 * rho3
y03 = b3 * rho3
pt13 = (int(x03 + 1000*(-b3)), int(y03 + 1000*(a3)))
pt23 = (int(x03 - 1000*(-b3)), int(y03 - 1000*(a3)))
cv.line(img_copy, pt13, pt23, (0,0,255), 3, cv.LINE_AA)
cv.imshow('lines', img_copy)
cv.waitKey(0)
cv.destroyAllWindows()
However, the result seems to be wrong:
So my assuption is that I either do something wrong when I declare the rho and theta values in the find_maxima() function, meaning that something is wrong with this:
max1 = value
x1position = x
y1position = y
rho1 = x
theta1 = y
OR that I am doing something wrong when translating the rho and theta value back to a line.
I would be very thankful if someone can help me with that!
Edit1: As request please finde the original Image where I want to finde the lines from below:
Edit2:
Thanks to the input of #Alessandro Jacopson and #Cris Luegno I was able to make some changes that definitely give me some hope!
In my def hough(img): I was setting the threshold to 255, which means that I only voted for white pixels, which is wrong since I want to look at the black pixels, since these pixels will indicate lines and not the white background of my image. So the calculation of the accumlator in def hough(img): looks like this now:
#Accumulator
for x in range(0, w):
for y in range(0, h):
treshold = 0
col = img[x, y]
if col <= treshold: #determines for each pixel at (x,y) if there is enough evidence of a straight line at that pixel.
for vx in range(0, thetaAxisSize):
theta = d_theta * vx #angle between the x axis and the line connecting the origin with that closest point.
rho = x*cos(theta) + y*sin(theta) #distance from the origin to the closest point on the straight line
vy = rAxisSize/2 + int(rho/d_rho+0.5) #Berechne Y-Werte im hough space image
pixel_houghed_img[vx, vy] += 1 #voting
return houghed_img
This leads to the following Accumulator and the following rho and thea values, when using the find_maxima() function:
# Results of the prints: (now top 8 instead of top 3)
# ('max', 155, 144, 142, 119, 119, 104, 103, 98)
# ('rho', 120, 264, 157, 121, 119, 198, 197, 197)
# ('theta', 416, 31, 458, 414, 417, 288, 291, 292)
The Lines that I can draw from this values look like this:
So this results are much more better but something seems to be still wrong. I have a strong suspicion that still something is wrong here:
for x in range(1, w):
for y in range(1, h):
value = pixel_houghed_img[x, y]
if(value > max1):
max1 = value
x1position = x
y1position = y
rho1 = value
theta1 = x
Here I am setting rho and theta equals [0...w] respectively [0...h]. I think that this is wrong since in the hough space values of X and why Y are not 0, 1,2,3... since we are in a another space. So I assume, that I have to multiply X and Y with something to bring them back in hough space. But this is just an assumption, maybe you guys can think of something else?
Again thank you very much to Alessandro and Cris for helping me out here!
Edit3: Working Code, thanks to #Cris Luengo
from math import hypot, pi, cos, sin
from PIL import Image
import numpy as np
import cv2 as cv
import math
def hough(img):
img = im.load()
w, h = im.size
thetaAxisSize = w #Width of the hough space image
rAxisSize = h #Height of the hough space image
rAxisSize= int(rAxisSize/2)*2 #we make sure that this number is even
houghed_img = Image.new("L", (thetaAxisSize, rAxisSize), 0) #legt Bildgroesse fest
pixel_houghed_img = houghed_img.load()
max_radius = hypot(w, h)
d_theta = pi / thetaAxisSize
d_rho = max_radius / (rAxisSize/2)
#Accumulator
for x in range(0, w):
for y in range(0, h):
treshold = 0
col = img[x, y]
if col <= treshold: #determines for each pixel at (x,y) if there is enough evidence of a straight line at that pixel.
for vx in range(0, thetaAxisSize):
theta = d_theta * vx #angle between the x axis and the line connecting the origin with that closest point.
rho = x*cos(theta) + y*sin(theta) #distance from the origin to the closest point on the straight line
vy = rAxisSize/2 + int(rho/d_rho+0.5) #Berechne Y-Werte im hough space image
pixel_houghed_img[vx, vy] += 1 #voting
return houghed_img, rAxisSize, d_rho, d_theta
def find_maxima(houghed_img, rAxisSize, d_rho, d_theta):
w, h = houghed_img.size
pixel_houghed_img = houghed_img.load()
maxNumbers = 9
ignoreRadius = 10
maxima = [0] * maxNumbers
rhos = [0] * maxNumbers
thetas = [0] * maxNumbers
for u in range(0, maxNumbers):
print('u:', u)
value = 0
xposition = 0
yposition = 0
#find maxima in the image
for x in range(0, w):
for y in range(0, h):
if(pixel_houghed_img[x,y] > value):
value = pixel_houghed_img[x, y]
xposition = x
yposition = y
#Save Maxima, rhos and thetas
maxima[u] = value
rhos[u] = (yposition - rAxisSize/2) * d_rho
thetas[u] = xposition * d_theta
pixel_houghed_img[xposition, yposition] = 0
#Delete the values around the found maxima
radius = ignoreRadius
for vx2 in range (-radius, radius): #checks the values around the center
for vy2 in range (-radius, radius): #checks the values around the center
x2 = xposition + vx2 #sets the spectated position on the shifted value
y2 = yposition + vy2
if not(x2 < 0 or x2 >= w):
if not(y2 < 0 or y2 >= h):
pixel_houghed_img[x2, y2] = 0
print(pixel_houghed_img[x2, y2])
print('max', maxima)
print('rho', rhos)
print('theta', thetas)
return maxima, rhos, thetas
im = Image.open("img5.pgm").convert("L")
houghed_img, rAxisSize, d_rho, d_theta = hough(im)
houghed_img.save("houghspace.bmp")
houghed_img.show()
img_copy = np.ones(im.size)
maxima, rhos, thetas = find_maxima(houghed_img, rAxisSize, d_rho, d_theta)
for t in range(0, len(maxima)):
a = math.cos(thetas[t])
b = math.sin(thetas[t])
x = a * rhos[t]
y = b * rhos[t]
pt1 = (int(x + 1000*(-b)), int(y + 1000*(a)))
pt2 = (int(x - 1000*(-b)), int(y - 1000*(a)))
cv.line(img_copy, pt1, pt2, (0,0,255), 3, cv.LINE_AA)
cv.imshow('lines', img_copy)
cv.waitKey(0)
cv.destroyAllWindows()
Original Image:
Accumulator:
Successful Line Detection:
This part of your code doesn't seem right:
max1 = value
x1position = x
y1position = y
rho1 = value
theta1 = x
If x and y are the two coordinates in the parameter space, they will correspond to rho and theta. Setting rho equal to the value makes no sense. I also don't know why you store x1position and y1position, since you don't use these variables.
Next, you need to transform these coordinates back to actual rho and theta values, inverting the transform you do when writing:
theta = d_theta * vx #angle between the x axis and the line connecting the origin with that closest point.
rho = x*cos(theta) + y*sin(theta) #distance from the origin to the closest point on the straight line
vy = rAxisSize/2 + int(rho/d_rho+0.5) #Berechne Y-Werte im hough space image
The inverse would be:
rho = (y - rAxisSize/2) * d_rho
theta = x * d_theta
First of all, following How to create a Minimal, Complete, and Verifiable example you should post or give a link to your image img3.pgm, if possible.
Then, you wrote that:
# Results of the print:
# ('max', 255, 255, 255)
# ('rho', 1, 1, 1)
# ('theta', 183, 184, 186)
so rho is the same for the three lines and theta is not so different varying between 183 and 186; so the three lines are almost equal each other and this fact does not depend on the method you use to get the line equation and draw it.
According to the tutorial Hough Line Transform it seems to me that your method for finding two points on a line is correct. That's is what the tutorial is suggesting and it seems to me equivalent to your code:
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
I suspect the peak finding algorithm may not be correct.
Your peak finding algorithm finds the location of the largest peak and then the two locations very close to that maximum.
For the sake of simplicity see what happens in just one dimension, a peak finding algorithm is expected to find three peak locations at x=-1, x=0 and x=1 and the peak values should be close to .25, .5 and 1.
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-2, 2, 1000)
y = np.exp(-(x-1)**2/0.01)+.5*np.exp(-(x)**2/0.01)+.25*np.exp(-(x+1)**2/0.01)
max1, max2, max3 = 0, 0, 0
m1 = np.zeros(1000)
m2 = np.zeros(1000)
m3 = np.zeros(1000)
x1position, x2position, x3position = 0, 0, 0
for i in range(0,1000):
value = y[i]
if(value > max1):
max1 = value
x1position = x[i]
elif(value > max2):
max2 = value
x2position = x[i]
elif(value > max3):
max3 = value
x3position = x[i]
m1[i] = max1
m2[i] = max2
m3[i] = max3
print('xposition',x1position, x2position, x3position )
print('max', max1, max2, max3)
plt.figure()
plt.subplot(4,1,1)
plt.plot(x, y)
plt.ylabel('$y$')
plt.subplot(4,1,2)
plt.plot(x, m1)
plt.ylabel('$max_1$')
plt.subplot(4,1,3)
plt.plot(x, m2)
plt.ylabel('$max_2$')
plt.subplot(4,1,4)
plt.plot(x, m3)
plt.xlabel('$x$')
plt.ylabel('$max_3$')
plt.show()
the output is
('xposition', 0.99899899899899891, 1.0030030030030028, 1.0070070070070072)
('max', 0.99989980471948192, 0.99909860379824966, 0.99510221871862647)
and it is not what expected.
Here you have a visual trace of the program:
To detect multiple peaks in a 2D field you should have a look for example at this Peak detection in a 2D array
I want to resize image with bilinear interpolation. I found new intensity value but I do not know how can I use it.. The code is below which is I written..
def resizeImageBI(im,width,height):
temp = np.zeros((height,width),dtype=np.uint8)
ratio_1 = float(im.size[0] - 1 )/ float(width - 1)
ratio_0 = float(im.size[1] - 1) / float(height - 1)
xx,yy = np.mgrid[:height, :width]
xmap = np.around(xx * ratio_0)
ymap = np.around(yy * ratio_1)
for i in xrange(0, height):
for j in xrange(0,width):
temp[i][j]=im.getpixel( ( ymap[i][j], xmap[i][j]) ) * getNewIntensity(i,j,ratio_1,ratio_0)
return Image.fromarray(temp)
firstly get variable image width ratio and height ratio
lena.png 0.5 1
Orginal image is here
That is output accorting to written code
I just had to do this for a class and I haven't been graded yet, so you should check this out before using.
Basic Interpolation function
def interpolation(y0,x0, y1,x1, x):
frac = (x - x0) / (x1 - x0)
return y0*(1-frac) + y1 * frac
Step 1: Map the original coordinates to the newly resized image
def get_coords(im, W, H):
h,w = im.shape
x = np.arange(0,w+1,1) * W/w
y = np.arange(0,h+1,1) * H/h
return x,y
Step 2: Create a function to interpolate in the x-direction on all rows.
def im_interp(im, H,W):
X = np.zeros(shape=(W,H))
x, y = get_coords(im, W, H)
for i,v in enumerate(X):
y0_idx = np.argmax(y >i) - 1
for j,_ in enumerate(v):
# subtracting 1 because this is the first val
# that is greater than j, want the idx before that
x0_idx = np.argmax(x > j) - 1
x1_idx = np.argmax(j < x)
x0 = x[x0_idx]
x1 = x[x1_idx]
y0 = im[y0_idx, x0_idx - 1]
y1 = im[y0_idx, x1_idx - 1]
X[i,j] = interpolation(y0, x0, y1, x1, j)
return X
Step 3: Use function from the above step to interpolate twice. First on the image in the x-direction, then on the transpose of the newly created image (y-direction)
def im_resize(im,H,W):
X_lin = im_interp(im, H,W)
X = im_interp(X_lin.T, H,W)
return X_lin, X.T
I return both images just to look at the difference.
i'm not sure if you want to do this manually as an exercise...
if not, there is scipy.mics.imresize that can do what you want
I have a travel time map, I want to get the integer points along the shortest path from source to receiver.
My present solution is that I make a runge-kutta integration from the receiver location and get a series of float points. Then I sample every 5 or some number of points and assume it a straight line between in order to use the Bresenham's line algorithm. With this approach, I will get the integer points.
However, it's not enough fast. Because I need to calculate a lot of receivers' shortest path, the sum of time will be very large.
I used line_profiler to analysis the time-consuming, which shows the major part of time is for function ruge-kutta and its calling function get_velocity
codes are below
def optimal_path_2d(gradx_interp,
grady_interp,
starting_point,
dx,
N=100):
"""
Find the optimal path from starting_point to the zero contour
of travel_time. dx is the grid spacing
Solve the equation x_t = - grad t / | grad t |
"""
def get_velocity(position):
""" return normalized velocity at pos """
x, y = position
vel = np.array([gradx_interp(y, x)[0][0], grady_interp(y, x)[0][0]])
return vel / np.linalg.norm(vel)
def runge_kutta(pos, ds):
""" Fourth order Runge Kutta point update """
k1 = ds * get_velocity(pos)
k2 = ds * get_velocity(pos - k1 / 2.0)
k3 = ds * get_velocity(pos - k2 / 2.0)
k4 = ds * get_velocity(pos - k3)
return pos - (k1 + 2 * k2 + 2 * k3 + k4) / 6.0
x = runge_kutta(starting_point, dx)
xl, yl = [], []
for i in range(N):
xl.append(x[0])
yl.append(x[1])
x = runge_kutta(x, dx)
distance = ((x[0] - xl[-1])**2 +
(x[1] - yl[-1])**2)**0.5
if distance < dx*0.9:
break
return yl, xl
def get_curve(x_curve, y_curve, num_interval):
"""Curve Algorithm based on Bresenham's Line Algorithm
Produces a list of tuples
"""
num = len(x_curve)
if num < num_interval:
print("num_interval is too large.")
ret_set = set()
x0 = x_curve[0]
y0 = y_curve[0]
for i in range(num_interval, num, num_interval):
x1 = x_curve[i]
y1 = y_curve[i]
points_on_line = get_line((x0, y0), (x1, y1))
ret_set.update(points_on_line)
x0 = x1
y0 = y1
if num % num_interval != 0:
n = int(num/num_interval)*num_interval
x0 = x_curve[n]
y0 = y_curve[n]
x1 = x_curve[-1]
y1 = y_curve[-1]
points_on_line = get_line((x0, y0), (x1, y1))
ret_set.update(points_on_line)
return list(ret_set)
def get_line(start, end):
"""modifed version of Bresenham's Line Algorithm
Produces a list of tuples from start and end
>>> points1 = get_line((0, 0), (3, 4))
>>> points2 = get_line((3, 4), (0, 0))
>>> assert(set(points1) == set(points2))
>>> print points1
[(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]
>>> print points2
[(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)]
"""
# Setup initial conditions
x1, y1 = (int(x) for x in start)
x2, y2 = (int(x) for x in end)
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
nx = 100
ny = 100
num_interval = 5
loc_src = (10, 10)
loc_rec = (70, 90)
coordx = np.arange(nx)
coordy = np.arange(ny)
X, Y = np.meshgrid(coordx, coords)
travel_time = (X-loc_src[0])**2/5 + (Y-loc_src[1])**2/10 # for simplicity
grad_t_y, grad_t_x = np.gradient(travel_time, dx)
if isinstance(travel_time, np.ma.MaskedArray):
grad_t_y[grad_t_y.mask] = 0.0
grad_t_y = grad_t_y.data
grad_t_x[grad_t_x.mask] = 0.0
grad_t_x = grad_t_x.data
gradx_interp = RectBivariateSpline(coordy, coordx, grad_t_x)
grady_interp = RectBivariateSpline(coordy, coordx, grad_t_y)
yl, xl = optimal_path(gradx_interp, grady_interp, loc_rec, dx)
grid_indx = get_curve(xl, yl, num_interval)
I hear that Cython will be faster, then I learn a little recently and try it. the result is only 2 faster than codes above because I'm really new to Cython. The code below is incomplete, and I just wrote it for testing.
import numpy as np
from numpy.core.umath_tests import inner1d
def func(X_interp, Y_interp):
def get_velocity(double x, double y ):
""" return normalized velocity at pos """
cdef double vel[2], norm
a = X_interp(y, x)
vel[0] = a[0][0]
b = Y_interp(y, x)
vel[1] = b[0][0]
# norm = (vel[0]**2 + vel[1]**2)**0.5
# vel[0] = vel[0]/norm
# vel[1] = vel[1]/norm
return vel
def runge_kutta(double x, double y, double ds):
""" Fourth order Runge Kutta point update """
cdef double k1[2], k2[2], k3[2], k4[2], r[2], pos[2]
pos[0] = x; pos[1] = y
k1 = get_velocity(pos[0], pos[1])
k2 = get_velocity(pos[0] - k1[0]/2.0*ds,pos[1] - k1[1]/2.0*ds)
k3 = get_velocity(pos[0] - k2[0]/2.0*ds,pos[1] - k2[1]/2.0*ds)
k4 = get_velocity(pos[0] - k3[0]/2.0*ds,pos[1] - k3[1]/2.0*ds)
cdef size_t i
for i in range(2):
r[i] = pos[i] - ds * (k1[i] + 2*k2[i] + 2*k3[i] + k4[i])/6.0
return r
for i in range(50):
runge_kutta(0, 0, 1.)
# print(runge_kutta(0, 0, 1.))
So I'm trying to write a function, lets call it foo, that takes the path of a binary image, gets the Hough lines along it and then returns the lines sorted by how many white pixels are along the Hough lines. This is the code I have so far, but its crashing at the "if(image[(x1+(istepx)),(y1+(istepy))].any()):" line with an invalid index. Do you guys see what I can do to fix the bug or know of a function built in OpenCV to do what I want?
def lineParams(line, length):
(dist, angl) = line
a = math.cos(angl)
b = math.sin(angl)
x0 = a * dist
y0 = b * dist
pt1 = (int(x0 - length * b), int(y0 + length * a))
pt2 = (int(x0 + length * b), int(y0 - length * a))
return (pt1, pt2)
def lineWhiteness(line, image):
(pt1, pt2) = lineParams(line, len(image))
count = 0
(x1, y1) = pt1
(x2, y2) = pt2
stepx = (x2 - x1) / 100
stepy = (y2 - y1) / 100
for i in xrange(1, 100):
#print image[(x1 + i * stepx), (y1 + i * stepy)]
if(image[(x1 + (i * stepx)), (y1 + (i * stepy))].any()):
count = count + 1
return count
def foo(path, display):
edges = CannyEdge(path, False)
lines = cv2.HoughLines(edges, rho, theta, threshold)
image = cv2.imread(path)
lines = lines[0]
lines = sorted(lines, key=lambda l: lineWhiteness(l, image))
return lines
I ended up solving it by using OpenCV's line iterator as follows and I'm currently trying to rewrite my line params function to be better.
def lineWhiteness(line, image):
(pt1, pt2) = lineParams(line, len(image))
count = 0
li = cv.InitLineIterator(cv.fromarray(image), pt1, pt2)
for (r, g, b) in li:
if (r or g or b):
count += 1
return count
In python, I have written a 3D rendering program. The Y rotation works fine, but the X rotation zooms in for some obscure reason. I couldn't spot it, so I put it up here.
def plotLine(W, H, (x, y, z), (x2, y2, z2), rotX, rotY, FOV=1.0):
try:
x = float(x)
y = float(y)
z = float(z)
x2 = float(x2)
y2 = float(y2)
z2 = float(z2)
if z == 0:
z = 0.01
if z2 == 0:
z2 = 0.01
x, y, z = rotateY((x, y, z), rotY)
x, y, z = rotateX((x, y, z), rotX)
x2, y2, z2 = rotateY((x2, y2, z2), rotY)
x2, y2, z2 = rotateX((x2, y2, z2), rotX)
scX = (x/z)*FOV
scY = (y/z)*FOV
scX *= min(W, H)
scY *= min(W, H)
scX += W/2
scY += H/2
scX2 = (x2/z2)*FOV
scY2 = (y2/z2)*FOV
scX2 *= min(W, H)
scY2 *= min(W, H)
scX2 += W/2
scY2 += H/2
pygame.draw.aaline(display, (0, 255, 0), (scX, scY), (scX2, scY2))
except (OverflowError, ZeroDivisionError):
return
def rotateY((x, y, z), degrees): # Looking left and right.
x, y, z = float(x), float(y), float(z)
rads = math.radians(degrees)
newX = (math.cos(rads)*x)+(math.sin(rads)*z)
newY = y
newZ = (-math.sin(rads)*x)+(math.cos(rads)*z)
return (newX, newY, newZ)
def rotateX((x, y, z), degrees):
x, y, z = float(x), float(y), float(z)
rads = math.radians(degrees)
newX = x
newY = (math.cos(rads)*y)+(math.sin(rads)*z)
newZ = (math.sin(rads)*y)+(math.cos(rads)*z)
return (newX, newY, newZ)
Any help would be appreciated!
BTW, I have looked up the matrix rotations on Wikipedia. Either Wikipedia got the matrices wrong, or I multiplied the matrices wrong, which is not likely. I have looked over them several times.
I think you have an error in your rotateX function
newY = (math.cos(rads)*y)+(math.sin(rads)*z)
newZ = (math.sin(rads)*y)+(math.cos(rads)*z)
should be
newY = (math.cos(rads)*y)+(math.sin(rads)*z)
newZ = (-math.sin(rads)*y)+(math.cos(rads)*z)
^
^
without the negative sign you will not get a rotation. You have done this correctly in your rotateY function but not the rotateX function.
If you look at the 2D submatrix of your currently coded 3D rotation you have
[cos(rads) sin(rads)]
[sin(rads) cos(rads)]
and the determinant of this is
1/(cos(rads)*cos(rads) - sin(rads)sin(rads))
= 1/cos(2*rads)
This is not equal to 1 for all angles rads and hence is not a rotation for all values of rads.
Note also that this rotation angle would be in the negative sense to what is usually associated with a rotation. You can see more information about this here on wikepedia