HSI image is equalized incorrectly in Python - python

I have implemented equalization for HSI color based images. I used numpy and math modules.
Firstly, I convert RGB image into HSI using this functions:
import math
import numpy as np
def rgb2hsi_px(px):
eps = 0.00000001
r, g, b = float(px[0]) / 255, float(px[1]) / 255, float(px[2]) / 255
# Hue component
numerator = 0.5 * ((r - g) + (r - b))
denominator = math.sqrt((r - g) ** 2 + (r - b) * (g - b))
theta = math.acos(numerator / (denominator + eps))
h = theta
if b > g:
h = 2 * math.pi - h
# Saturation component
num = min(r, g, b)
den = r + g + b
if den == 0:
den = eps
s = 1 - 3 * num / den
if s == 0:
h = 0
# Intensity component
i = (r + g + b) / 3
return h, s, i
def rgb2hsi(image):
hsi_image = np.zeros_like(image).astype('float')
height, width, _ = image.shape
for x in range(height):
for y in range(width):
px = rgb2hsi_px(image[x, y])
hsi_image[x, y] = px
return np.array(hsi_image)
Then I equalize an intensity value of converted image. The equalize function was implemented using this article:
import math
import numpy as np
def equalize(img):
eps = 0.000000000001
h, w, _ = img.shape
num_of_pxs = h * w
mean = 0.0
new_img = np.array(img)
while not abs(mean - 0.5) < eps:
for i in range(h):
for j in range(w):
mean += new_img[i, j, 2]
mean /= num_of_pxs
if mean != 0.5:
theta = math.log(0.5, math.e) / math.log(mean, math.e)
for x in range(h):
for y in range(w):
px = list(new_img[x, y])
px[2] = (px[2] ** theta)
new_img[x, y] = px
return new_img
After, I convert HSI image back to RGB using the next code:
import math
import numpy as np
def hsi2rgb_px(px):
h, s, i = float(px[0]), float(px[1]), float(px[2]) * 255
if 0 <= h < 2 * math.pi / 3:
b = i * (1 - s)
r = i * (1 + (s * math.cos(h)) / math.cos(math.pi / 3 - h))
g = 3 * i - (r + b)
elif 2 * math.pi / 3 <= h < 4 * math.pi / 3:
r = i * (1 - s)
g = i * (1 + (s * math.cos(h - 2 * math.pi / 3) / math.cos(math.pi / 3 - (h - 2 * math.pi / 3))))
b = 3 * i - (r + g)
elif 4 * math.pi / 3 <= h <= 2 * math.pi:
g = i * (1 - s)
b = i * (1 + (s * math.cos(h - 4 * math.pi / 3) / math.cos(math.pi / 3 - (h - 4 * math.pi / 3))))
r = 3 * i - (g + b)
else:
raise IndexError('h is out of range: {}'.format(h))
return round(r), round(g), round(b)
def hsi2rgb(image):
rgb_image = np.zeros_like(image).astype(np.uint8)
height, width, _ = image.shape
for x in range(height):
for y in range(width):
px = hsi2rgb_px(image[x, y])
rgb_image[x, y] = px
return np.array(rgb_image)
But an equalization gives an incorrect result. The size (in megabytes) of equalized image is larger than the original one. I'm not sure if it's normal but if yes, please, let me know. And another problem is that an output image has worse quality.
Here is an original image:
And the equalized image:
Can someone help me to fix my code, or reference me to similar article/question?
[UPDATE]
Driver program to test an algorithm:
import matplotlib.image as mp_img
input_img = mp_img.imread('input.bmp')
hsi_img = rgb2hsi(input_img)
equalized_img = equalize(hsi_img)
out_img = hsi2rgb(equalized_img)
mp_img.imsave('out.bmp', out_img)

Related

Faster way to iterate through pixel using numpy with conditions?

def colorize(im, h, s, l_adjust):
result = Image.new('RGBA', im.size)
pixin = np.copy(im)
pixout = np.array(result)
>>>>>>>>>>>>>>>>> loop <<<<<<<<<<<<<<<<<
for y in range(pixout.shape[1]):
for x in range(pixout.shape[0]):
lum = currentRGB(pixin[x, y][0], pixin[x, y][1], pixin[x, y][2])
r, g, b = colorsys.hls_to_rgb(h, lum, s)
r, g, b = int(r * 255.99), int(g * 255.99), int(b * 255.99)
pixout[x, y] = (r, g, b, 255)
>>>>>>>>>>>>>>>>>>>>> Loop end <<<<<<<<<<<
return result
Trying to find the HSL per pixel value from a frame of input video but it's taking too much time about 1.5s but want to reduce the time to at least within 0.3s. Any faster way to do this without using these 2 loops? Looking for something like LUT(Look up table)/vectorize/something with NumPy shortcut to avoid those 2 loops. Thanks
OR
Part 2 ->>
If I break the custom currentRGB() into the for loops it looks like :
def colorize(im, h, s, l_adjust):
result = Image.new('RGBA', im.size)
pixin = np.copy(im)
pixout = np.array(result)
for y in range(pixout.shape[1]):
for x in range(pixout.shape[0]):
currentR, currentG, currentB = pixin[x, y][0]/255 , pixin[x, y][1]/255, pixin[x, y][2]/255
#luminance
lum = (currentR * 0.2126) + (currentG * 0.7152) + (currentB * 0.0722)
if l_adjust > 0:
lum = lum * (1 - l_adjust)
lum = lum + (1.0 - (1.0 - l_adjust))
else:
lum = lum * (l_adjust + 1)
l = lum
r, g, b = colorsys.hls_to_rgb(h, l, s)
r, g, b = int(r * 255.99), int(g * 255.99), int(b * 255.99)
pixout[x, y] = (r, g, b, 255)
return pixout
You can use Numba to drastically speed the computation up. Here is the implementation:
import numba as nb
#nb.njit('float32(float32,float32,float32)')
def hue_to_rgb(p, q, t):
if t < 0: t += 1
if t > 1: t -= 1
if t < 1./6: return p + (q - p) * 6 * t
if t < 1./2: return q
if t < 2./3: return p + (q - p) * (2./3 - t) * 6
return p
#nb.njit('UniTuple(uint8,3)(float32,float32,float32)')
def hls_to_rgb(h, l, s):
if s == 0:
# achromatic
r = g = b = l
else:
q = l * (1 + s) if l < 0.5 else l + s - l * s
p = 2 * l - q
r = hue_to_rgb(p, q, h + 1./3)
g = hue_to_rgb(p, q, h)
b = hue_to_rgb(p, q, h - 1./3)
return (int(r * 255.99), int(g * 255.99), int(b * 255.99))
#nb.njit('void(uint8[:,:,::1],uint8[:,:,::1],float32,float32,float32)', parallel=True)
def colorize_numba(pixin, pixout, h, s, l_adjust):
for x in nb.prange(pixout.shape[0]):
for y in range(pixout.shape[1]):
currentR, currentG, currentB = pixin[x, y, 0]/255 , pixin[x, y, 1]/255, pixin[x, y, 2]/255
#luminance
lum = (currentR * 0.2126) + (currentG * 0.7152) + (currentB * 0.0722)
if l_adjust > 0:
lum = lum * (1 - l_adjust)
lum = lum + (1.0 - (1.0 - l_adjust))
else:
lum = lum * (l_adjust + 1)
l = lum
r, g, b = hls_to_rgb(h, l, s)
pixout[x, y, 0] = r
pixout[x, y, 1] = g
pixout[x, y, 2] = b
pixout[x, y, 3] = 255
def colorize(im, h, s, l_adjust):
result = Image.new('RGBA', im.size)
pixin = np.copy(im)
pixout = np.array(result)
colorize_numba(pixin, pixout, h, s, l_adjust)
return pixout
This optimized parallel implementation is about 2000 times faster than the original code on my 6-core machine (on 800x600 images). The hls_to_rgb implementation is coming from this post. Note that the string in #nb.njit decorators are not mandatory but enable Numba to compile the function ahead of time instead of at the first call. For more information about the types, please read the Numba documentation.

Notch Reject Filtering in Python

I'm trying to implement notch-reject filtering in python for an assignment. I have tried using the notch reject filter formula from Rafael Gonzales book and all I got was a edge detected image. Then I tried ideal notch rejecting and here are the results:
Input image--Output of my program -- Expected output
Here is my code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def notch_reject_filter(shape, d0=9, u_k=0, v_k=0):
P, Q = shape
# Initialize filter with zeros
H = np.zeros((P, Q))
# Traverse through filter
for u in range(0, P):
for v in range(0, Q):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - P / 2 + u_k) ** 2 + (v - Q / 2 + v_k) ** 2)
D_muv = np.sqrt((u - P / 2 - u_k) ** 2 + (v - Q / 2 - v_k) ** 2)
if D_uv <= d0 or D_muv <= d0:
H[u, v] = 0.0
else:
H[u, v] = 1.0
return H
img = cv2.imread('input.png', 0)
img_shape = img.shape
original = np.fft.fft2(img)
center = np.fft.fftshift(original)
NotchRejectCenter = center * notch_reject_filter(img_shape, 32, 50, 50)
NotchReject = np.fft.ifftshift(NotchRejectCenter)
inverse_NotchReject = np.fft.ifft2(NotchReject) # Compute the inverse DFT of the result
plot_image = np.concatenate((img, np.abs(inverse_NotchReject)),axis=1)
plt.imshow(plot_image, "gray"), plt.title("Notch Reject Filter")
plt.show()
all I got was a edge detected image because your implementation was High pass filter which is a black circle in the middle, and that works as Edge detector.
Then I tried ideal notch rejecting This is correct if you applied that correctly.
The main concept is to filter the undesired Noise in the frequency domain, the noise can be seen as white spots, and your role is to suppress that white spots by multiplying them by black circles in frequency domain(known as filtering).
to improve this result add more notch filters (H5, H6, ...) to suppress the noise.
import cv2
import numpy as np
import matplotlib.pyplot as plt
#------------------------------------------------------
def notch_reject_filter(shape, d0=9, u_k=0, v_k=0):
P, Q = shape
# Initialize filter with zeros
H = np.zeros((P, Q))
# Traverse through filter
for u in range(0, P):
for v in range(0, Q):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - P / 2 + u_k) ** 2 + (v - Q / 2 + v_k) ** 2)
D_muv = np.sqrt((u - P / 2 - u_k) ** 2 + (v - Q / 2 - v_k) ** 2)
if D_uv <= d0 or D_muv <= d0:
H[u, v] = 0.0
else:
H[u, v] = 1.0
return H
#-----------------------------------------------------
img = cv2.imread('input.png', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
phase_spectrumR = np.angle(fshift)
magnitude_spectrum = 20*np.log(np.abs(fshift))
img_shape = img.shape
H1 = notch_reject_filter(img_shape, 4, 38, 30)
H2 = notch_reject_filter(img_shape, 4, -42, 27)
H3 = notch_reject_filter(img_shape, 2, 80, 30)
H4 = notch_reject_filter(img_shape, 2, -82, 28)
NotchFilter = H1*H2*H3*H4
NotchRejectCenter = fshift * NotchFilter
NotchReject = np.fft.ifftshift(NotchRejectCenter)
inverse_NotchReject = np.fft.ifft2(NotchReject) # Compute the inverse DFT of the result
Result = np.abs(inverse_NotchReject)
plt.subplot(222)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.subplot(221)
plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('magnitude spectrum')
plt.subplot(223)
plt.imshow(magnitude_spectrum*NotchFilter, "gray")
plt.title("Notch Reject Filter")
plt.subplot(224)
plt.imshow(Result, "gray")
plt.title("Result")
plt.show()
Drive by comment, using the for-loop for the notch filter generation is very slow. That operation can be optimized
def notch_reject_filter_vec(shape: tuple[int, int], d0: int, u_k: int, v_k: int):
(M, N) = shape
H_0_u = np.repeat(np.arange(M), N).reshape((M, N))
H_0_v = np.repeat(np.arange(N), M).reshape((N, M)).transpose()
D_uv = np.sqrt((H_0_u - M / 2 + u_k) ** 2 + (H_0_v - N / 2 + v_k) ** 2)
D_muv = np.sqrt((H_0_u - M / 2 - u_k) ** 2 + (H_0_v - N / 2 - v_k) ** 2)
selector_1 = D_uv <= d0
selector_2 = D_muv <= d0
selector = np.logical_or(selector_1, selector_2)
H = np.ones((M, N))
H[selector] = 0
return H

The program does not print the result

Now, I am working with the following program, but I can't get the Image Moments, Centroid etc. that I need.
Code:
import cv2
import numpy
from matplotlib.pyplot import imread
from numpy import mgrid, sum
image = imread('imagemoment.png')
def moments2e(image):
assert len(image.shape) == 2 # only for grayscale images
x, y = mgrid[:image.shape[0], :image.shape[1]]
moments = {}
moments['mean_x'] = sum(x * image) / sum(image)
moments['mean_y'] = sum(y * image) / sum(image)
# raw or spatial moments
moments['m00'] = sum(image)
moments['m01'] = sum(x * image)
moments['m10'] = sum(y * image)
moments['m11'] = sum(y * x * image)
moments['m02'] = sum(x ** 2 * image)
moments['m20'] = sum(y ** 2 * image)
moments['m12'] = sum(x * y ** 2 * image)
moments['m21'] = sum(x ** 2 * y * image)
moments['m03'] = sum(x ** 3 * image)
moments['m30'] = sum(y ** 3 * image)
# central moments
moments['mu01']= sum((y-moments['mean_y'])*image) # should be 0
moments['mu10']= sum((x-moments['mean_x'])*image) # should be 0
moments['mu11'] = sum((x - moments['mean_x']) * (y - moments['mean_y']) * image)
moments['mu02'] = sum((y - moments['mean_y']) ** 2 * image) # variance
moments['mu20'] = sum((x - moments['mean_x']) ** 2 * image) # variance
moments['mu12'] = sum((x - moments['mean_x']) * (y - moments['mean_y']) ** 2 * image)
moments['mu21'] = sum((x - moments['mean_x']) ** 2 * (y - moments['mean_y']) * image)
moments['mu03'] = sum((y - moments['mean_y']) ** 3 * image)
moments['mu30'] = sum((x - moments['mean_x']) ** 3 * image)
# opencv versions
# moments['mu02'] = sum(image*(x-m01/m00)**2)
# moments['mu02'] = sum(image*(x-y)**2)
# wiki variations
# moments['mu02'] = m20 - mean_y*m10
# moments['mu20'] = m02 - mean_x*m01
# central standardized or normalized or scale invariant moments
moments['nu11'] = moments['mu11'] / sum(image) ** (2 / 2 + 1)
moments['nu12'] = moments['mu12'] / sum(image) ** (3 / 2 + 1)
moments['nu21'] = moments['mu21'] / sum(image) ** (3 / 2 + 1)
moments['nu20'] = moments['mu20'] / sum(image) ** (2 / 2 + 1)
moments['nu03'] = moments['mu03'] / sum(image) ** (3 / 2 + 1) # skewness
moments['nu30'] = moments['mu30'] / sum(image) ** (3 / 2 + 1) # skewness
return moments
Can you help me solve this problem, please?
Thank you very much.
One issue I can see at the first glance is, you have defined the function moments2e(image) but have not called it. You need to call the moments2e(image) function outside the function definition.
import cv2
import numpy
from matplotlib.pyplot import imread
from numpy import mgrid, sum
image = imread('imagemoment.png')
def moments2e(image):
assert len(image.shape) == 2 # only for grayscale images
x, y = mgrid[:image.shape[0], :image.shape[1]]
.
.
.
return moments
moments = moments2e(image)
print(moments)

multithreaded mandelbrot set

Is it possible to change the formula of the mandelbrot set (which is f(z) = z^2 + c by default) to a different one ( f(z) = z^2 + c * e^(-z) is what i need) when using the escape time algorithm and if possible how?
I'm currently using this code by FB36
# Multi-threaded Mandelbrot Fractal (Do not run using IDLE!)
# FB - 201104306
import threading
from PIL import Image
w = 512 # image width
h = 512 # image height
image = Image.new("RGB", (w, h))
wh = w * h
maxIt = 256 # max number of iterations allowed
# drawing region (xa < xb & ya < yb)
xa = -2.0
xb = 1.0
ya = -1.5
yb = 1.5
xd = xb - xa
yd = yb - ya
numThr = 5 # number of threads to run
# lock = threading.Lock()
class ManFrThread(threading.Thread):
def __init__ (self, k):
self.k = k
threading.Thread.__init__(self)
def run(self):
# each thread only calculates its own share of pixels
for i in range(k, wh, numThr):
kx = i % w
ky = int(i / w)
a = xa + xd * kx / (w - 1.0)
b = ya + yd * ky / (h - 1.0)
x = a
y = b
for kc in range(maxIt):
x0 = x * x - y * y + a
y = 2.0 * x * y + b
x = x0
if x * x + y * y > 4:
# various color palettes can be created here
red = (kc % 8) * 32
green = (16 - kc % 16) * 16
blue = (kc % 16) * 16
# lock.acquire()
global image
image.putpixel((kx, ky), (red, green, blue))
# lock.release()
break
if __name__ == "__main__":
tArr = []
for k in range(numThr): # create all threads
tArr.append(ManFrThread(k))
for k in range(numThr): # start all threads
tArr[k].start()
for k in range(numThr): # wait until all threads finished
tArr[k].join()
image.save("MandelbrotFractal.png", "PNG")
From the code I infer that z = x + y * i and c = a + b * i. That corresponds f(z) - z ^2 + c. You want f(z) = z ^2 + c * e^(-z).
Recall that e^(-z) = e^-(x + yi) = e^(-x) * e^i(-y) = e^(-x)(cos(y) - i*sin(y)) = e^(-x)cos(y) - i (e^(-x)sin(y)). Thus you should update your lines to be the following:
x0 = x * x - y * y + a * exp(-x) * cos(y) + b * exp(-x) * sin(y);
y = 2.0 * x * y + a * exp(-x) * sin(y) - b * exp(-x) * cos(y)
x = x0
You might need to adjust maxIt if you don't get the level of feature differentiation you're after (it might take more or fewer iterations to escape now, on average) but this should be the mathematical expression you're after.
As pointed out in the comments, you might need to adjust the criterion itself and not just the maximum iterations in order to get the desired level of differentiation: changing the max doesn't help for ones that never escape.
You can try deriving a good escape condition or just try out some things and see what you get.

Intersections between Geodesics (shortest distance paths) on the surface of a sphere

I've searched far and wide but have yet to find a suitable answer to this problem. Given two lines on a sphere, each defined by their start and end points, determine whether or not and where they intersect. I've found this site (http://mathforum.org/library/drmath/view/62205.html) which runs through a good algorithm for the intersections of two great circles, although I'm stuck on determining whether the given point lies along the finite section of the great circles.
I've found several sites which claim they've implemented this, Including some questions here and on stackexchange, but they always seem to reduce back to the intersections of two great circles.
The python class I'm writing is as follows and seems to almost work:
class Geodesic(Boundary):
def _SecondaryInitialization(self):
self.theta_1 = self.point1.theta
self.theta_2 = self.point2.theta
self.phi_1 = self.point1.phi
self.phi_2 = self.point2.phi
sines = math.sin(self.phi_1) * math.sin(self.phi_2)
cosines = math.cos(self.phi_1) * math.cos(self.phi_2)
self.d = math.acos(sines - cosines * math.cos(self.theta_2 - self.theta_1))
self.x_1 = math.cos(self.theta_1) * math.cos(self.phi_1)
self.x_2 = math.cos(self.theta_2) * math.cos(self.phi_2)
self.y_1 = math.sin(self.theta_1) * math.cos(self.phi_1)
self.y_2 = math.sin(self.theta_2) * math.cos(self.phi_2)
self.z_1 = math.sin(self.phi_1)
self.z_2 = math.sin(self.phi_2)
self.theta_wraps = (self.theta_2 - self.theta_1 > PI)
self.phi_wraps = ((self.phi_1 < self.GetParametrizedCoords(0.01).phi and
self.phi_2 < self.GetParametrizedCoords(0.99).phi) or (
self.phi_1 > self.GetParametrizedCoords(0.01).phi) and
self.phi_2 > self.GetParametrizedCoords(0.99))
def Intersects(self, boundary):
A = self.y_1 * self.z_2 - self.z_1 * self.y_2
B = self.z_1 * self.x_2 - self.x_1 * self.z_2
C = self.x_1 * self.y_2 - self.y_1 * self.x_2
D = boundary.y_1 * boundary.z_2 - boundary.z_1 * boundary.y_2
E = boundary.z_1 * boundary.x_2 - boundary.x_1 * boundary.z_2
F = boundary.x_1 * boundary.y_2 - boundary.y_1 * boundary.x_2
try:
z = 1 / math.sqrt(((B * F - C * E) ** 2 / (A * E - B * D) ** 2)
+ ((A * F - C * D) ** 2 / (B * D - A * E) ** 2) + 1)
except ZeroDivisionError:
return self._DealWithZeroZ(A, B, C, D, E, F, boundary)
x = ((B * F - C * E) / (A * E - B * D)) * z
y = ((A * F - C * D) / (B * D - A * E)) * z
theta = math.atan2(y, x)
phi = math.atan2(z, math.sqrt(x ** 2 + y ** 2))
if self._Contains(theta, phi):
return point.SPoint(theta, phi)
theta = (theta + 2* PI) % (2 * PI) - PI
phi = -phi
if self._Contains(theta, phi):
return spoint.SPoint(theta, phi)
return None
def _Contains(self, theta, phi):
contains_theta = False
contains_phi = False
if self.theta_wraps:
contains_theta = theta > self.theta_2 or theta < self.theta_1
else:
contains_theta = theta > self.theta_1 and theta < self.theta_2
phi_wrap_param = self._PhiWrapParam()
if phi_wrap_param <= 1.0 and phi_wrap_param >= 0.0:
extreme_phi = self.GetParametrizedCoords(phi_wrap_param).phi
if extreme_phi < self.phi_1:
contains_phi = (phi < max(self.phi_1, self.phi_2) and
phi > extreme_phi)
else:
contains_phi = (phi > min(self.phi_1, self.phi_2) and
phi < extreme_phi)
else:
contains_phi = (phi > min(self.phi_1, self.phi_2) and
phi < max(self.phi_1, self.phi_2))
return contains_phi and contains_theta
def _PhiWrapParam(self):
a = math.sin(self.d)
b = math.cos(self.d)
c = math.sin(self.phi_2) / math.sin(self.phi_1)
param = math.atan2(c - b, a) / self.d
return param
def _DealWithZeroZ(self, A, B, C, D, E, F, boundary):
if (A - D) is 0:
y = 0
x = 1
elif (E - B) is 0:
y = 1
x = 0
else:
y = 1 / math.sqrt(((E - B) / (A - D)) ** 2 + 1)
x = ((E - B) / (A - D)) * y
theta = (math.atan2(y, x) + PI) % (2 * PI) - PI
return point.SPoint(theta, 0)
def GetParametrizedCoords(self, param_value):
A = math.sin((1 - param_value) * self.d) / math.sin(self.d)
B = math.sin(param_value * self.d) / math.sin(self.d)
x = A * math.cos(self.phi_1) * math.cos(self.theta_1) + (
B * math.cos(self.phi_2) * math.cos(self.theta_2))
y = A * math.cos(self.phi_1) * math.sin(self.theta_1) + (
B * math.cos(self.phi_2) * math.sin(self.theta_2))
z = A * math.sin(self.phi_1) + B * math.sin(self.phi_2)
new_phi = math.atan2(z, math.sqrt(x**2 + y**2))
new_theta = math.atan2(y, x)
return point.SPoint(new_theta, new_phi)
EDIT: I forgot to specify that if two curves are determined to intersect, I then need to have the point of intersection.
A simpler approach is to express the problem in terms of geometric primitive operations like the dot product, the cross product, and the triple product. The sign of the determinant of u, v, and w tells you which side of the plane spanned by v and w contains u. This enables us to detect when two points are on opposite sites of a plane. That's equivalent to testing whether a great circle segment crosses another great circle. Performing this test twice tells us whether two great circle segments cross each other.
The implementation requires no trigonometric functions, no division, no comparisons with pi, and no special behavior around the poles!
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def dot(v1, v2):
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z
def cross(v1, v2):
return Vector(v1.y * v2.z - v1.z * v2.y,
v1.z * v2.x - v1.x * v2.z,
v1.x * v2.y - v1.y * v2.x)
def det(v1, v2, v3):
return dot(v1, cross(v2, v3))
class Pair:
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
# Returns True if the great circle segment determined by s
# straddles the great circle determined by l
def straddles(s, l):
return det(s.v1, l.v1, l.v2) * det(s.v2, l.v1, l.v2) < 0
# Returns True if the great circle segments determined by a and b
# cross each other
def intersects(a, b):
return straddles(a, b) and straddles(b, a)
# Test. Note that we don't need to normalize the vectors.
print(intersects(Pair(Vector(1, 0, 1), Vector(-1, 0, 1)),
Pair(Vector(0, 1, 1), Vector(0, -1, 1))))
If you want to initialize unit vectors in terms of angles theta and phi, you can do that, but I recommend immediately converting to Cartesian (x, y, z) coordinates to perform all subsequent calculations.
Intersection using plane trig can be calculated using the below code in UBasic.
5 'interx.ub adapted from code at
6 'https://rosettacode.org
7 '/wiki/Find_the_intersection_of_two_linesSinclair_ZX81_BASIC
8 'In U Basic by yuji kida https://en.wikipedia.org/wiki/UBASIC
10 XA=48.7815144526:'669595.708
20 YA=-117.2847245001:'2495736.332
30 XB=48.7815093807:'669533.412
40 YB=-117.2901673467:'2494425.458
50 XC=48.7824947147:'669595.708
60 YC=-117.28751374:'2495736.332
70 XD=48.77996737:'669331.214
80 YD=-117.2922957:'2494260.804
90 print "THE TWO LINES ARE:"
100 print "YAB=";YA-XA*((YB-YA)/(XB-XA));"+X*";((YB-YA)/(XB-XA))
110 print "YCD=";YC-XC*((YD-YC)/(XD-XC));"+X*";((YD-YC)/(XD-XC))
120 X=((YC-XC*((YD-YC)/(XD-XC)))-(YA-XA*((YB-YA)/(XB-XA))))/(((YB-YA)/(XB-XA))-((YD-YC)/(XD-XC)))
130 print "Lat = ";X
140 Y=YA-XA*((YB-YA)/(XB-XA))+X*((YB-YA)/(XB-XA))
150 print "Lon = ";Y
160 'print "YCD=";YC-XC*((YD-YC)/(XD-XC))+X*((YD-YC)/(XD-XC))

Categories

Resources