Notch Reject Filtering in Python - python

I'm trying to implement notch-reject filtering in python for an assignment. I have tried using the notch reject filter formula from Rafael Gonzales book and all I got was a edge detected image. Then I tried ideal notch rejecting and here are the results:
Input image--Output of my program -- Expected output
Here is my code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def notch_reject_filter(shape, d0=9, u_k=0, v_k=0):
P, Q = shape
# Initialize filter with zeros
H = np.zeros((P, Q))
# Traverse through filter
for u in range(0, P):
for v in range(0, Q):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - P / 2 + u_k) ** 2 + (v - Q / 2 + v_k) ** 2)
D_muv = np.sqrt((u - P / 2 - u_k) ** 2 + (v - Q / 2 - v_k) ** 2)
if D_uv <= d0 or D_muv <= d0:
H[u, v] = 0.0
else:
H[u, v] = 1.0
return H
img = cv2.imread('input.png', 0)
img_shape = img.shape
original = np.fft.fft2(img)
center = np.fft.fftshift(original)
NotchRejectCenter = center * notch_reject_filter(img_shape, 32, 50, 50)
NotchReject = np.fft.ifftshift(NotchRejectCenter)
inverse_NotchReject = np.fft.ifft2(NotchReject) # Compute the inverse DFT of the result
plot_image = np.concatenate((img, np.abs(inverse_NotchReject)),axis=1)
plt.imshow(plot_image, "gray"), plt.title("Notch Reject Filter")
plt.show()

all I got was a edge detected image because your implementation was High pass filter which is a black circle in the middle, and that works as Edge detector.
Then I tried ideal notch rejecting This is correct if you applied that correctly.
The main concept is to filter the undesired Noise in the frequency domain, the noise can be seen as white spots, and your role is to suppress that white spots by multiplying them by black circles in frequency domain(known as filtering).
to improve this result add more notch filters (H5, H6, ...) to suppress the noise.
import cv2
import numpy as np
import matplotlib.pyplot as plt
#------------------------------------------------------
def notch_reject_filter(shape, d0=9, u_k=0, v_k=0):
P, Q = shape
# Initialize filter with zeros
H = np.zeros((P, Q))
# Traverse through filter
for u in range(0, P):
for v in range(0, Q):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - P / 2 + u_k) ** 2 + (v - Q / 2 + v_k) ** 2)
D_muv = np.sqrt((u - P / 2 - u_k) ** 2 + (v - Q / 2 - v_k) ** 2)
if D_uv <= d0 or D_muv <= d0:
H[u, v] = 0.0
else:
H[u, v] = 1.0
return H
#-----------------------------------------------------
img = cv2.imread('input.png', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
phase_spectrumR = np.angle(fshift)
magnitude_spectrum = 20*np.log(np.abs(fshift))
img_shape = img.shape
H1 = notch_reject_filter(img_shape, 4, 38, 30)
H2 = notch_reject_filter(img_shape, 4, -42, 27)
H3 = notch_reject_filter(img_shape, 2, 80, 30)
H4 = notch_reject_filter(img_shape, 2, -82, 28)
NotchFilter = H1*H2*H3*H4
NotchRejectCenter = fshift * NotchFilter
NotchReject = np.fft.ifftshift(NotchRejectCenter)
inverse_NotchReject = np.fft.ifft2(NotchReject) # Compute the inverse DFT of the result
Result = np.abs(inverse_NotchReject)
plt.subplot(222)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.subplot(221)
plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('magnitude spectrum')
plt.subplot(223)
plt.imshow(magnitude_spectrum*NotchFilter, "gray")
plt.title("Notch Reject Filter")
plt.subplot(224)
plt.imshow(Result, "gray")
plt.title("Result")
plt.show()

Drive by comment, using the for-loop for the notch filter generation is very slow. That operation can be optimized
def notch_reject_filter_vec(shape: tuple[int, int], d0: int, u_k: int, v_k: int):
(M, N) = shape
H_0_u = np.repeat(np.arange(M), N).reshape((M, N))
H_0_v = np.repeat(np.arange(N), M).reshape((N, M)).transpose()
D_uv = np.sqrt((H_0_u - M / 2 + u_k) ** 2 + (H_0_v - N / 2 + v_k) ** 2)
D_muv = np.sqrt((H_0_u - M / 2 - u_k) ** 2 + (H_0_v - N / 2 - v_k) ** 2)
selector_1 = D_uv <= d0
selector_2 = D_muv <= d0
selector = np.logical_or(selector_1, selector_2)
H = np.ones((M, N))
H[selector] = 0
return H

Related

how to accelerate the numpy for-loop, for coloring point-cloud by its intensity

enter code hereI want to color a point-cloud by its intensity.
currently I use the following for-loop to apply a colormap function to the intensity(4th-dim) of points:
import numpy as np
points = np.random.random([128*1200,4])
points_colors = np.zeros([points.shape[0], 3])
for idx, p_c in enumerate(points[:, 3]):
points_colors[idx, :] = color_map(p_c)
points_colors /= 255.0
an example of color mapping function:
def color_map( value, minimum=0, maximum=255):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
return r, g, b
Coloring the point clouds consumes much more time than directly use open3d's original colormap(i.e. color by points' x,y,z-pose)
How could I accelerate the process of color-mapping point-clouds by its intensity?
Other solution that does not convert xyzi-point-cloud to xyzrgb-point-cloud is also welcomed.
Ps. the color_map I am actually using is a bit more complicated but has same output:
def rainbow_color_map(
val,
minval = 0
maxval=256,
normalize=False,
colors=[(1, 1, 255), (1, 255, 1), (255, 1, 1)] * 10,
):
i_f = float(val - minval) / float(maxval - minval) * (len(colors) - 1)
i, f = int(i_f // 1), i_f % 1 # Split into whole & fractional parts.
(r1, g1, b1), (r2, g2, b2) = colors[i], colors[i + 1]
if normalize:
return (
(r1 + f * (r2 - r1)) / maxval,
(g1 + f * (g2 - g1)) / maxval,
(b1 + f * (b2 - b1)) / maxval,
)
else:
return r1 + f * (r2 - r1), g1 + f * (g2 - g1), b1 + f * (b2 - b1)
You can modify the function to calculate the array as a whole without using a loop:
def color_map(minimum, maximum, value):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = 255*(1 - ratio)
b[b<0] = 0
b = b.astype(int)
r = 255*(ratio - 1)
r[r<0] = 0
r = r.astype(int)
g = 255 - b - r
points_colors = np.c_[r, g, b]
return points_colors
Then call the function like this :
import numpy as np
points = np.random.random([128*1200,4])
minimum, maximum = np.min(points[:, 3]), np.max(points[:, 3])
points_colors = color_map(minimum, maximum, points[:, 3])

How to simulate a heat diffusion on a rectangular ring with FiPy?

I am new to solving a PDE and experimenting with a heat diffusion on a copper body of a rectangular ring shape using FiPy.
And this is a plot of simulation result at some times.
I am using the Grid2D() for a mesh and the CellVariable.constrain() to specify boundary conditions. The green dots are centers of exterior faces where T = 273.15 + 25 (K), and blue dots are centers of interior faces where T = 273.15 + 30 (K).
Obviously, I am doing something wrong, because the temperature goes down to 0K. How should I specify boundary conditions correctly?
These are the code.
import numpy as np
import matplotlib.pyplot as plt
import fipy
def get_mask_of_rect(mesh, x, y, w, h):
def left_id(i, j): return mesh.numberOfHorizontalFaces + i*mesh.numberOfVerticalColumns + j
def right_id(i, j): return mesh.numberOfHorizontalFaces + i*mesh.numberOfVerticalColumns + j + 1
def bottom_id(i, j): return i*mesh.nx + j
def top_id(i, j): return (i+1)*mesh.nx + j
j0, i0 = np.floor(np.array([x, y]) / [mesh.dx, mesh.dy]).astype(int)
n, m = np.round(np.array([w, h]) / [mesh.dx, mesh.dy]).astype(int)
mask = np.zeros_like(mesh.exteriorFaces, dtype=bool)
for i in range(i0, i0 + n):
mask[left_id(i, j0)] = mask[right_id(i, j0 + m-1)] = True
for j in range(j0, j0 + m):
mask[bottom_id(i0, j)] = mask[top_id(i0 + n-1, j)] = True
return mask
mesh = fipy.Grid2D(Lx = 1, Ly = 1, nx = 20, ny = 20) # Grid of size 1m x 1m
k_over_c_rho = 3.98E2 / (3.85E2 * 8.96E3) # The thermal conductivity, specific heat capacity, and density of Copper in MKS
dt = 0.1 * (mesh.dx**2 + mesh.dy**2) / (4*k_over_c_rho)
T0 = 273.15 # 0 degree Celsius in Kelvin
T = fipy.CellVariable(mesh, name='T', value=T0+25)
mask_e = mesh.exteriorFaces
T.constrain(T0+25., mask_e)
mask_i = get_mask_of_rect(mesh, 0.25, 0.25, 0.5, 0.5)
T.constrain(T0+30, mask_i)
eq = fipy.TransientTerm() == fipy.DiffusionTerm(coeff=k_over_c_rho)
viewer = fipy.MatplotlibViewer(vars=[T], datamin=0, datamax=400)
plt.ioff()
viewer._plot()
plt.plot(*mesh.faceCenters[:, mask_e], '.g')
plt.plot(*mesh.faceCenters[:, mask_i], '.b')
def update():
for _ in range(10):
eq.solve(var=T, dt=dt)
viewer._plot()
plt.draw()
timer = plt.gcf().canvas.new_timer(interval=50)
timer.add_callback(update)
timer.start()
plt.show()
.constrain() does not work for internal faces (see the warning at the end of Applying internal “boundary” conditions).
You can achieve an internal fixed value condition using sources, however. As a first cut, try
mask_i = get_mask_of_rect(mesh, 0.25, 0.25, 0.5, 0.5)
mask_i_cell = fipy.CellVariable(mesh, value=False)
mask_i_cell[mesh.faceCellIDs[..., mask_i]] = True
largeValue = 1e6
eq = (fipy.TransientTerm() == fipy.DiffusionTerm(coeff=k_over_c_rho)
- fipy.ImplicitSourceTerm(mask_i_cell * largeValue)
+ mask_i_cell * largeValue * (T0 + 30))
This constrains the cells on either side of the faces identified by mask_i to be at T0+30.
I am posting my own answer for future readers.
For boundary conditions for internal faces, you should use the implicit and explicit source terms on the equation, as in the jeguyer's answer.
By using source terms, you don't need to calculate a mask for faces, like this.(The get_mask_of_rect() in my question isn't required.)
T = fipy.CellVariable(mesh, name = 'T', value = T0 + 25)
mask_e = mesh.exteriorFaces
T.constrain(T0 + 25., mask_e)
mask_i_cell = (
(0.25 < mesh.x) & (mesh.x < 0.25 + 0.5) &
(0.25 < mesh.y) & (mesh.y < 0.25 + 0.5)
)
large_value = 1E6
eq = fipy.TransientTerm() == (
fipy.DiffusionTerm(coeff = k_over_c_rho) -
fipy.ImplicitSourceTerm(mask_i_cell * large_value) +
mask_i_cell * (large_value * (T0 + 30) # explicit source
))
viewer = fipy.MatplotlibViewer(vars = [T], datamin = T0, datamax = T0+50)

Plotting Monte Carlo Simulations for option pricing in Python

I am trying to show the monte carlo barrier prices for different number of simultations in the x axis. This is what i tried so far but i'm getting the error -> ValueError: x and y must have same first dimension, but have shapes (10,) and (5,).
I am new to python and as hard as i try i cannot find the error
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
def mc_single_barrier_do(S0, K, T, H, r, vol, N, M):
# Constants
dt = T / N # change in time
nudt = (r - 0.5 * vol ** 2) * dt # deterministic component
volsdt = vol * np.sqrt(dt) # diffusion coefficient
erdt = np.exp(r * dt) # discount factor
# Standard Error Placeholders
sum_CT = 0
sum_CT2 = 0
# Monte Carlo Method
for i in range(M):
# Barrier Crossed Flag
BARRIER = False
St = S0
for j in range(N):
epsilon = np.random.normal()
Stn = St * np.exp(nudt + volsdt * epsilon)
St = Stn
Ptn = np.exp(-2. * (H - St) * (H - Stn) / (St ** 2. * volsdt ** 2.))
Pt = Ptn
if Pt >= npr.uniform():
BARRIER = True
if np.amin(St) > H and BARRIER == False:
CT = np.maximum(St - K, 0)
else:
CT = 0.
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT * CT
C0_MC = np.exp(-r * T) * sum_CT / M
return C0_MC
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
assert (method in ['MC', 'AV', 'CV'])
mean_payoffs = np.zeros(int(np.ceil(max_sample / 10)))
if method == 'MC':
for n_sample in range(10, max_sample + 1, 10):
payoffs = mc_single_barrier_do(n_sample, S0, K, T, H, r, vol, N)
mean_payoffs[int(n_sample / 10 - 1)] = np.mean(payoffs)
return mean_payoffs
r = 0.1
vol = 0.2
T = 2
N = 20
dt = T / N
S0 = 50
K = 50
H = 45
max_sample = 100
MC_price_estimates = sim_iterator(S0, T, r, vol, K, H, max_sample, N, method='MC')
x_axis1 = range(10, max_sample + 1, 10)
plt.plot(x_axis1, MC_price_estimates)
plt.xlabel("No. of Simulations")
plt.ylabel("Estimated option price")
plt.title("Ordinary Monte Carlo Method")
plt.legend()
plt.show()
in your function definition you used:
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
while when using the function you used:
MC_price_estimates = sim_iterator(S0, T, r, vol, K, H, max_sample, N, method='MC')
python has positional arguments, which means the arguments are mapped according to their position, not their name, so in the first position is mapped to the first argument, which means S0 in the second line was mapped to max_sample in the first line, just fix the arguments arrangement, or use keyword arguments S0=S0.
MC_price_estimates = sim_iterator(S0=S0, T=T, r=r, vol=vol, K=K, H=H, max_sample=max_sample, N=N, method='MC')
this is what your code will look like when you fix all arguments to be keyword arguments.
def mc_single_barrier_do(S0, K, T, H, r, vol, N, M):
# Constants
dt = T / N # change in time
nudt = (r - 0.5 * vol ** 2) * dt # deterministic component
volsdt = vol * np.sqrt(dt) # diffusion coefficient
erdt = np.exp(r * dt) # discount factor
# Standard Error Placeholders
sum_CT = 0
sum_CT2 = 0
# Monte Carlo Method
for i in range(M):
# Barrier Crossed Flag
BARRIER = False
St = S0
for j in range(N):
epsilon = np.random.normal()
Stn = St * np.exp(nudt + volsdt * epsilon)
St = Stn
Ptn = np.exp(-2. * (H - St) * (H - Stn) / (St ** 2. * volsdt ** 2.))
Pt = Ptn
if Pt >= npr.uniform():
BARRIER = True
if np.amin(St) > H and BARRIER == False:
CT = np.maximum(St - K, 0)
else:
CT = 0.
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT * CT
C0_MC = np.exp(-r * T) * sum_CT / M
return C0_MC
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
assert (method in ['MC', 'AV', 'CV'])
mean_payoffs = np.zeros(int(np.ceil(max_sample / 10)))
if method == 'MC':
for n_sample in range(10, max_sample + 1, 10):
payoffs = mc_single_barrier_do(M=n_sample,S0= S0, K=K, T=T, H=H, r=r, vol=vol, N=N)
mean_payoffs[int(n_sample / 10 - 1)] = np.mean(payoffs)
return mean_payoffs
r = 0.1
vol = 0.2
T = 2
N = 20
dt = T / N
S0 = 50
K = 50
H = 45
max_sample = 100
MC_price_estimates = sim_iterator(S0=S0, T=T, r=r, vol=vol, K=K, H=H, max_sample=max_sample, N=N, method='MC')
x_axis1 = range(10, max_sample + 1, 10)
plt.plot(x_axis1, MC_price_estimates)
plt.xlabel("No. of Simulations")
plt.ylabel("Estimated option price")
plt.title("Ordinary Monte Carlo Method")
plt.legend()
plt.show()

Efficient boolean disk packing mask

I need to make a mask of hexagonal packed disks. The code below does the job, but I don't feel like its efficient. I'm learning python as well so I'd love to get some expert advice on how to do this more computationally efficient.
r = 0.01
X, Y = np.mgrid[0:1:1000j, 0:1:1000j]
mask = np.full(X.shape, False)
px, py = np.mgrid[r : 1 : 2 * r * np.sqrt(3), r : 1 + r + np.finfo(float).eps: 2 * r]
px = np.vstack((px, px + r * np.sqrt(3)))
py = np.vstack((py, py - r))
fig, ax = plt.subplots(figsize= (12, 12), dpi=50)
img = ax.imshow(mask * 1, cmap = 'gray', vmin = 0, vmax = 1, extent = [0, 1, 0, 1])
for i, _ in np.ndenumerate(px): #is this loop dumb and inefficient?
C = (X - px[i]) ** 2 + (Y - py[i]) ** 2
mask = mask | (C < r ** 2)
img.set_data(mask * 1)
ax.set_aspect(1)
In particular, is there a way to vectorize the for loop?
Thanks
It may be efficient to create a single tile of the pattern, and then repeat it horizontally and vertically as needed:
Create a tile:
import numpy as np
import matplotlib.pyplot as plt
r = 0.01
sq3 = np.sqrt(3)
samples = 1000
X, Y = np.mgrid[1:(1 + 2 * sq3):int(sq3 * samples) * 1j, 0:2:samples * 1j]
XY = np.c_[X.flatten(), Y.flatten()]
# coordinates of centers of disks; suffices to take disks of radius 1 here
p = np.array([[1, 1], [(1 + sq3), 0], [(1 + sq3), 2], [(1 + 2 * sq3), 1]])
# compute the distance from each point of XY to each disk center
dist = (XY**2).sum(axis=1).reshape(-1, 1) + (p**2).sum(axis=1) - 2 * (XY # p.T)
# mask points outside the disks
tile = (np.min(dist, axis=1) < 1).reshape(X.shape)
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_aspect(1)
plt.imshow(tile, extent=[0, 2 * r, r, (1 + 2 * sq3) * r]);
It gives:
Repeat the tile:
# the number of times to repeat the tile in the horizontal and vertical directions
h, w = 20, 30
# donwsample the tile as needed and replicate
sample_rate = 10
mask = np.tile(tile[::sample_rate, ::sample_rate], (h, w))
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_aspect(1)
ax.imshow(mask, extent=[0, 2 * r * w, 0, 2 * sq3 * r * h]);
This gives:

HSI image is equalized incorrectly in Python

I have implemented equalization for HSI color based images. I used numpy and math modules.
Firstly, I convert RGB image into HSI using this functions:
import math
import numpy as np
def rgb2hsi_px(px):
eps = 0.00000001
r, g, b = float(px[0]) / 255, float(px[1]) / 255, float(px[2]) / 255
# Hue component
numerator = 0.5 * ((r - g) + (r - b))
denominator = math.sqrt((r - g) ** 2 + (r - b) * (g - b))
theta = math.acos(numerator / (denominator + eps))
h = theta
if b > g:
h = 2 * math.pi - h
# Saturation component
num = min(r, g, b)
den = r + g + b
if den == 0:
den = eps
s = 1 - 3 * num / den
if s == 0:
h = 0
# Intensity component
i = (r + g + b) / 3
return h, s, i
def rgb2hsi(image):
hsi_image = np.zeros_like(image).astype('float')
height, width, _ = image.shape
for x in range(height):
for y in range(width):
px = rgb2hsi_px(image[x, y])
hsi_image[x, y] = px
return np.array(hsi_image)
Then I equalize an intensity value of converted image. The equalize function was implemented using this article:
import math
import numpy as np
def equalize(img):
eps = 0.000000000001
h, w, _ = img.shape
num_of_pxs = h * w
mean = 0.0
new_img = np.array(img)
while not abs(mean - 0.5) < eps:
for i in range(h):
for j in range(w):
mean += new_img[i, j, 2]
mean /= num_of_pxs
if mean != 0.5:
theta = math.log(0.5, math.e) / math.log(mean, math.e)
for x in range(h):
for y in range(w):
px = list(new_img[x, y])
px[2] = (px[2] ** theta)
new_img[x, y] = px
return new_img
After, I convert HSI image back to RGB using the next code:
import math
import numpy as np
def hsi2rgb_px(px):
h, s, i = float(px[0]), float(px[1]), float(px[2]) * 255
if 0 <= h < 2 * math.pi / 3:
b = i * (1 - s)
r = i * (1 + (s * math.cos(h)) / math.cos(math.pi / 3 - h))
g = 3 * i - (r + b)
elif 2 * math.pi / 3 <= h < 4 * math.pi / 3:
r = i * (1 - s)
g = i * (1 + (s * math.cos(h - 2 * math.pi / 3) / math.cos(math.pi / 3 - (h - 2 * math.pi / 3))))
b = 3 * i - (r + g)
elif 4 * math.pi / 3 <= h <= 2 * math.pi:
g = i * (1 - s)
b = i * (1 + (s * math.cos(h - 4 * math.pi / 3) / math.cos(math.pi / 3 - (h - 4 * math.pi / 3))))
r = 3 * i - (g + b)
else:
raise IndexError('h is out of range: {}'.format(h))
return round(r), round(g), round(b)
def hsi2rgb(image):
rgb_image = np.zeros_like(image).astype(np.uint8)
height, width, _ = image.shape
for x in range(height):
for y in range(width):
px = hsi2rgb_px(image[x, y])
rgb_image[x, y] = px
return np.array(rgb_image)
But an equalization gives an incorrect result. The size (in megabytes) of equalized image is larger than the original one. I'm not sure if it's normal but if yes, please, let me know. And another problem is that an output image has worse quality.
Here is an original image:
And the equalized image:
Can someone help me to fix my code, or reference me to similar article/question?
[UPDATE]
Driver program to test an algorithm:
import matplotlib.image as mp_img
input_img = mp_img.imread('input.bmp')
hsi_img = rgb2hsi(input_img)
equalized_img = equalize(hsi_img)
out_img = hsi2rgb(equalized_img)
mp_img.imsave('out.bmp', out_img)

Categories

Resources