I'm testing a TOF camera from Broadcom.
It has hexagonal pixels.
I wish to represent the histogram in 3D as in the utility of the constructor.
I tested the vedo library. But I can’t give the values in Z and reorder the cells and trace to the ground
from vedo import *
from vedo.pyplot import histogram
import numpy as np
N = 2000
x = np.random.randn(N) * 1.0
y = np.random.randn(N) * 1.5
# hexagonal binned histogram:
histo = histogram(x, y,
bins=100,
mode='hexbin',
xtitle="\sigma_x =1.0",
ytitle="\sigma_y =1.5",
ztitle="counts",
fill=True,
cmap='terrain',
)
# add a formula:
f = r'f(x, y)=A \exp \left(-\left(\frac{\left(x-x_{o}\right)^{2}}'
f+= r'{2 \sigma_{x}^{2}}+\frac{\left(y-y_{o}\right)^{2}}'
f+= r'{2 \sigma_{y}^{2}}\right)\right)'
formula = Latex(f, c='k', s=1.5).rotateX(90).rotateZ(90).pos(1.5,-2,1)
show(histo, formula, axes=1, viewup='z')
You can easily create it with e.g.
from vedo import *
import numpy as np
settings.defaultFont = "Theemim"
vals = np.abs(np.random.randn(8*4)) # heights
cols = colorMap(vals, "RdYlBu")
items = []
k = 0
for i in range(8):
for j in range(4):
val = vals[k]
col = cols[k]
x, y, z = [i+j%2/2, j-j%2/6, val+0.01]
hexa = Circle([x,y], r=0.55, res=6)
hbar = hexa.extrude(val) # create the hex bar
hbar.lighting("default").flat().c(col)
txt = Text3D(precision(val,3), [x,y,z], s=.12, justify='center', c='k')
items += [hbar, txt]
k += 1
show(items, axes=dict(xtitle="x-cell"))
Related
I am working with a projected coordinate dataset that contains x,y,z data (432 line csv with X Y Z headers, not attached). I wish to import this dataset, calculate a new grid based on user input and then start performing some statistics on points that fall within the new grid. I've gotten to the point that I have two lists (raw_lst with 431(x,y,z) and grid_lst with 16(x,y) (calling n,e)) but when I try to iterate through to start calculating average and density for the new grid it all falls apart. I am trying to output a final list that contains the grid_lst x and y values along with the calculated average z and density values.
I searched numpy and scipy libraries thinking that they may have already had something to do what I am wanting but was unable to find anything. Let me know if any of you all have any thoughts.
sample_xyz_reddot_is_newgrid_pictoral_representation
import pandas as pd
import math
df=pd.read_csv("Sample_xyz.csv")
N=df["X"]
E=df["Y"]
Z=df["Z"]
#grid = int(input("Specify grid value "))
grid = float(0.5) #for quick testing the grid value is set to 0.5
#max and total calculate the input area extents
max_N = math.ceil(max(N))
max_E = math.ceil(max(E))
min_E = math.floor(min(E))
min_N = math.floor(min(N))
total_N = max_N - min_N
total_E = max_E - min_E
total_N = int(total_N/grid)
total_E = int(total_E/grid)
#N_lst and E_lst calculate the mid points based on the input file extents and the specified grid file
N_lst = []
n=float(max_N)-(0.5*grid)
for x in range(total_N):
N_lst.append(n)
n=n-grid
E_lst = []
e=float(max_E)-(0.5*grid)
for x in range(total_E):
E_lst.append(e)
e=e-grid
grid_lst = []
for n in N_lst:
for e in E_lst:
grid_lst.append((n,e))
#converts the imported dataframe to list
raw_lst = df.to_records(index=False)
raw_lst = list(raw_lst)
#print(grid_lst) # grid_lst is a list of 16 (n,e) tuples for the new grid coordinates.
#print(raw_lst) # raw_lst is a list of 441 (n,e,z) tuples from the imported file - calling these x,y,z.
#The calculation where it all falls apart.
t=[]
average_lst = []
for n, e in grid_lst:
for x, y, z in raw_lst:
if n >= x-(grid/2) and n <= x+(grid/2) and e >= y-(grid/2) and e <= y+(grid/2):
t.append(z)
average = sum(t)/len(t)
density = len(t)/grid
average_lst = (n,e,average,density)
print(average_lst)
# print("The length of this list is " + str(len(average_lst)))
# print("The length of t is " + str(len(t)))
SAMPLE CODE FOR RUNNING
import random
grid=5
raw_lst = [(random.randrange(0,10), random.randrange(0,10), random.randrange(0,2))for i in range(100)]
grid_lst = [(2.5,2.5),(2.5,7.5),(7.5,2.5),(7.5,7.5)]
t=[]
average_lst = []
for n, e in grid_lst:
for x, y, z in raw_lst:
if n >= x-(grid/2) and n <= x+(grid/2) and e >= y-(grid/2) and e <= y+(grid/2):
t.append(z)
average = sum(t)/len(t)
density = len(t)/grid
average_lst = (n,e,average,density)
print(average_lst)
Some advices
when working with arrays, use numpy. It has more functionalities
when working with grids it's often more handy the use x-coords, y-coords as single arrays
Comments to the solution
obviousley you have a grid, or rather a box, grd_lst. We generate it as a numpy meshgrid (gx,gy)
you have a number of points raw_list. We generate each elemnt of it as 1-dimensional numpy arrays
you want to select the r_points that are in the g_box. We use the percentage formula for that: tx = (rx-gxMin)/(gxMax-gxMin)
if tx, ty are within [0..1] we store the index
as an intermediate result we get all indices of raw_list that are within the g_box
with that index you can extract the elements of raw_list that are within the g_box and can do some statistics
note that I have omitted the z-coord. You will have to improve this solution.
--
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.colors as mclr
from matplotlib import cm
f10 = 'C://gcg//picStack_10.jpg' # output file name
f20 = 'C://gcg//picStack_20.jpg' # output file name
def plot_grid(gx,gy,rx,ry,Rx,Ry,fOut):
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
myCmap = mclr.ListedColormap(['blue','lightgreen'])
ax.pcolormesh(gx, gy, gx, edgecolors='b', cmap=myCmap, lw=1, alpha=0.3)
ax.scatter(rx,ry,s=150,c='r', alpha=0.7)
ax.scatter(Rx,Ry,marker='s', s=150,c='gold', alpha=0.5)
ax.set_aspect('equal')
plt.savefig(fOut)
plt.show()
def get_g_grid(nx,ny):
ix = 2.5 + 5*np.linspace(0,1,nx)
iy = 2.5 + 5*np.linspace(0,1,ny)
gx, gy = np.meshgrid(ix, iy, indexing='ij')
return gx,gy
def get_raw_points(N):
rx,ry,rz,rv = np.random.randint(0,10,N), np.random.randint(0,10,N), np.random.randint(0,2,N), np.random.uniform(low=0.0, high=1.0, size=N)
return rx,ry,rz,rv
N = 100
nx, ny = 2, 2
gx,gy = get_base_grid(nx,ny)
rx,ry,rz,rv = get_raw_points(N)
plot_grid(gx,gy,rx,ry,0,0,f10)
def get_the_points_inside(gx,gy,rx,ry):
#----- run throuh the g-grid -------------------------------
nx,ny = gx.shape
N = len(rx)
index = []
for jx in range(0,nx-1):
for jy in range(0,ny-1):
#--- run through the r_points
for jr in range(N):
test_x = (rx[jr]-gx[jx,jy]) / (gx[jx+1,jy] - gx[jx,jy])
test_y = (ry[jr]-gy[jx,jy]) / (gy[jx,jy+1] - gy[jx,jy])
if (0.0 <= test_x <= 1.0) and (0.0 <= test_y <= 1.0):
index.append(jr)
return index
index = get_the_points_inside(gx,gy,rx,ry)
Rx, Ry, Rz, Rv = rx[index], ry[index], rz[index], rv[index]
plot_grid(gx,gy,rx,ry,Rx,Ry,f20)
I have plotted ABC feild contours, when I add vector arrows like Plotting Glyphs (Vectors). I don't know how to make mesh for 3D data.
import pyvista as pv
import numpy as np
from numpy import mgrid
xmin = -800.
xmax = 800.
Lx = xmax-xmin
B0 = 1
k = 1
alpha = 2.0*np.pi*k/Lx
x, y, z = Lx*mgrid[0:1:51j, 0:1:51j, 0:1:51j]
Bx = B0*(np.sin(alpha*z) + np.cos(alpha*y))
By = B0*(np.sin(alpha*x) + np.cos(alpha*z))
Bz = B0*(np.sin(alpha*y) + np.cos(alpha*x))
B = Bx+By+Bz
grid = pv.StructuredGrid(x, y, z)
grid["ABC field 3D isocontour"] = B.flatten()
contours = grid.contour()
pv.set_plot_theme('document')
p = pv.Plotter()
p.add_mesh(contours)
#contours.plot(show_edges=True, show_grid=True, cpos="xy")
contours.plot(show_grid=True,screenshot='abc3d.png')
#p.show(screenshot='abc3d.png')
Result:
Thanks to the developer's responce, I now understand better how mesh is defined. Above data sets need np.column_stack().
import pyvista as pv
import numpy as np
from numpy import mgrid
import matplotlib.pyplot as plt
print('initializing domain')
xmin = -800.
xmax = 800.
Lx = xmax-xmin
B0 = 1
k = 1
alpha = 2.0*np.pi*k/Lx
x, y, z = Lx*mgrid[0:1:51j, 0:1:51j, 0:1:51j]
print('initializing 3D B field')
Bx = B0*(np.sin(alpha*z) + np.cos(alpha*y))
By = B0*(np.sin(alpha*x) + np.cos(alpha*z))
Bz = B0*(np.sin(alpha*y) + np.cos(alpha*x))
B = np.column_stack((Bx.ravel(), By.ravel(), Bz.ravel()))
grid = pv.StructuredGrid(x, y, z)
grid["ABC field magnitude"] = np.linalg.norm(B, axis=1)
grid["ABC field vectors"] = B
grid.set_active_vectors("ABC field vectors")
contours = grid.contour(8, scalars="ABC field magnitude")
arrows = contours.glyph(orient="ABC field vectors", factor=200.0)
arrows_grid = grid.glyph(orient="ABC field vectors", factor=50.0)
print('plotting')
pv.set_plot_theme('document')
p = pv.Plotter(notebook=0, shape=(2,2))
cmap = plt.cm.get_cmap("viridis", 4)
#p.background_color='white'
#p.window_size
p.add_mesh(grid, cmap=cmap)
p.add_mesh(arrows_grid)
p.subplot(0,1)
slices = grid.slice_orthogonal(x=20, y=20, z=30)
#slices = grid.slice_orthogonal()
p.add_mesh(slices, cmap=cmap)
p.subplot(1,0)
p.add_mesh(contours, opacity=1)
p.show_grid()
p.subplot(1,1)
p.add_mesh(arrows)
p.show_grid()
p.link_views()
p.view_isometric()
p.show(screenshot='abc3d.png')
Result
Arnold–Beltrami–Childress Mangetic Field
I am plotting 2D images of energy and density distribution. There is always a slight misalignment in the mapping where the very first "columns" seem to go to the last columns during the plot.
I have attach link to for data test file.
Data files
Here is the plot :
Is there anything to prevent this ?
The partial code in plotting is as follows:
import numpy as np
import matplotlib.pyplot as plt
import pylab as pyl
import scipy.stats as ss
import matplotlib.ticker as ticker
import matplotlib.transforms as tr
#%matplotlib inline
pi = 3.1415
n = 5e24 # density plasma
m = 9.109e-31
eps = 8.85e-12
e = 1.6021725e-19
c = 3e8
wp=np.sqrt(n*e*e/(m*eps))
kp = np.sqrt(n*e*e/(m*eps))/c #plasma wavenumber
case=400
## decide on the target range of analysis for multiples
start= 20500
end = 21500
gap = 1000
## Multiples plots
def target_range (start, end, gap):
while start<= end:
yield start
start += gap
for step in target_range(start, end, gap):
fdata =np.genfromtxt('./beam_{}'.format(step)).reshape(-1,6)
## dimension, dt, and superpaticle
xBoxsize = 50e-6 #window size
yBoxsize = 80e-6 #window size
xbind = 10
ybind = 1
dx = 4e-8 #cell size
dy = 4e-7 #cell size
dz = 1e-6 #assume to be same as dy
dt = 1.3209965456e-16
sptcl = 1.6e10
xsub = 0e-6
xmax = dt*step*c
xmin = xmax - xBoxsize
ysub = 1e-7
ymin = ysub #to make our view window
ymax = yBoxsize - ysub
xbins = int((xmax - xmin)/(dx*xbind))
ybins = int((ymax - ymin)/(dy*ybind))
#zbins = int((zmax - zmin)/dz) #option for 3D
# To make or define "data_arr" as a matrix with 2D array size 'xbins x ybins'
data_arr = np.zeros((2,xbins,ybins), dtype=np.float)
for line in fdata:
x = int((line[0]-xmin)/(dx*xbind))
y = int((line[1]-ymin)/(dy*ybind))
#z = int((line[2]-zmin)/dz)
if x >= xbins: x = xbins - 1
if y >= ybins: y = ybins - 1
#if z >= zbins: z = zbins - 1
data_arr[0, x, y] = data_arr[0,x, y] + 1 #cummulative adding up the number of particles
energy_total = np.sqrt(1+ line[2]*line[2]/(c*c)+line[3]*line[3]/(c*c))/0.511
data_arr[1, x, y] += energy_total
#array 1 tells us the energy while array 0 tells us the particles
## make average energy , total energy/particle number
np.errstate(divide='ignore',invalid='ignore')
en_arr = np.true_divide(data_arr[1],data_arr[0]) # total energy/number of particles
en_arr[en_arr == np.inf] = 0
en_arr = np.nan_to_num(en_arr)
en_arr = en_arr.T
## This part is real density of the distribution
data_arr[0]= data_arr[0] * sptcl/dx/dy #in m-3
d = data_arr[0].T
## Plot and save density and energy distribution figures
den_dist=plt.figure(1)
plt.imshow(d,origin='lower', aspect = 'auto',cmap =plt.get_cmap('gnuplot'),extent =(xmin/1e-3,xmax/1e-3,ymin/1e-6,ymax/1e-6))
plt.title('Density_dist [m-3]_{}'.format(step))
plt.xlabel('distance[mm]')
plt.ylabel('y [um]')
plt.colorbar()
plt.show()
den_dist.savefig("./Qen_distribution_{}.png".format(step),format ='png')
#note:cmap: rainbow, hot,jet,gnuplot,plasma
energy_dist=plt.figure(2)
plt.imshow(en_arr, origin ='lower',aspect = 'auto', cmap =plt.get_cmap('jet'),extent =(xmin/1e-3,xmax/1e-3,ymin/1e-6,ymax/1e-6))
plt.title ('Energy_dist [MeV]_{} '.format(step))
plt.xlabel('distance[mm]')
plt.ylabel('y [um]')
plt.colorbar()
plt.show()
energy_dist.savefig("./Qenergy_distribution_{}.png".format(step),format ='png')
My data looks like this
I want to interpolate this to a 4 cell grid. Each cell would just have average values of all the points lying inside it.
The output then should look like this
Thus we have converted the entire data to a 2x2 matrix. Each cell of this matrix will have average x coordinate & average y coordinate values of all the points lying inside them.
A1= (3,-3) ; A2 = (3.5, 1.5)
A3= (-1,-3) ; A4= (-2,1)
=====WHAT IVE TRIED=====
avg = [[
(
( mat[row][col][0]
+ mat[row][col+1][0]
+ mat[row+1][col][0]
+ mat[row+1][col+1][0] ) / 4.0
,
( mat[row][col][1]
+ mat[row][col+1][1]
+ mat[row+1][col][1]
+ mat[row+1][col+1][1] ) / 4.0
)
for col in range(0, len(mat[0]), 2) ]
for row in range(0, len(mat), 2)
]
I'm not that good with numpy/scipy, i think this could be vastly improved in terms of elegancy and efficiency, but it works:
-> jupyter notebook with intermediate plots
Final code:
import numpy as np
import matplotlib.pyplot as plt
import math
data = np.random.uniform(low=-2.0, high=2.0, size=(2,100))
dataX = data[0]
dataY = data[1]
#plot the data
plt.plot(data[0], data[1], 'b+')
gridSize = 1.0
# grid coordinates are lower left point of grid rectangles
gridMaxX = math.floor(max(dataX) / gridSize)
gridMaxY = math.floor(max(dataY) / gridSize)
gridMinX = math.floor(min(dataX) / gridSize)
gridMinY = math.floor(min(dataY) / gridSize)
gridX = np.arange(gridMinX,gridMaxX + gridSize, gridSize)
gridY = np.arange(gridMinY,gridMaxY + gridSize, gridSize)
#plot the grid
for ix, x in enumerate(gridX):
plt.axvline(x=x)
for iy, y in enumerate(gridY):
plt.axhline(y=y)
#iterate the grid
for gridPosX in gridX:
for gridPosY in gridY:
inCell = lambda x,y: (gridPosX<x and x<gridPosX+gridSize
and gridPosY<y and y<gridPosY+gridSize)
pointsInCell = [ (x,y) for (x,y) in zip(dataX, dataY) if inCell(x,y)]
if len(pointsInCell) > 0:
xPos, yPos = zip(*pointsInCell)
plt.plot(np.mean(xPos), np.mean(yPos), 'ro')
plt.show()
Main Problem: How can the scipy.signal.cwt() function be inversed.
I have seen where Matlab has an inverse continuous wavelet transform function which will return the original form of the data by inputting the wavelet transform, although you can filter out the slices you don't want.
MATALAB inverse cwt funciton
Since scipy doesn't appear to have the same function, I have been trying to figure out how to get the data back in the same form, while removing the noise and background.
How do I do this?
I tried squaring it to remove negative values, but this gives me values way to large and not quite right.
Here is what I have been trying:
# Compute the wavelet transform
widths = range(1,11)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT_to_original_data = (xy['y'] * cwtmatr)**2
And here is a fully compilable short script to show you the type of data I am trying to get and what I have etc.:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
# Make some random data with peaks and noise
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 60 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 40 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
y_noise = np.random.normal(loc=30, scale=10, size=len(x))
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
# Compute the wavelet transform
# I can't figure out what the width is or does?
widths = range(1,11)
# Ricker is 2nd derivative of Gaussian
# (*close* to what *most* of the features are in my data)
# (They're actually Lorentzians and Breit-Wigner-Fano lines)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT = (xy['y'] * cwtmatr)**2
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(4,3,1)
ax = {}
for i in range(0, 11):
ax[i] = fig.add_subplot(4,3, i+2)
ax_desired_transformed_data = fig.add_subplot(4,3,12)
ax_raw_data.plot(xy['x'], xy['y'], 'g-')
for i in range(0,10):
ax[i].plot(xy['x'], WT[i])
ax_desired_transformed_data.plot(xy['x'], desired_peaks, 'k-')
fig.tight_layout()
plt.show()
This script will output this image:
Where the first plot is the raw data, the middle plots are the wavelet transforms and the last plot is what I want to get out as the processed (background and noise removed) data.
Does anyone have any suggestions? Thank you so much for the help.
I ended up finding a package which provides an inverse wavelet transform function called mlpy. The function is mlpy.wavelet.uwt. This is the compilable script I ended up with which may interest people if they are trying to do noise or background removal:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mlpy.wavelet as wave
# Make some random data with peaks and noise
############################################################
def gen_data():
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 100 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 80 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
# make x axis
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
avg_noise_level = 30
std_dev_noise = 10
size = len(x)
scattering_noise_amp = 100
scat_center = 100
scat_width = 15
scat_std_dev_noise = 100
y_scattering_noise = np.random.normal(scattering_noise_amp, scat_std_dev_noise, size) * np.e**(-(x-scat_center)**2/(2*scat_width**2))
y_noise = np.random.normal(avg_noise_level, std_dev_noise, size) + y_scattering_noise
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
return xy
# Random data Generated
#############################################################
xy = gen_data()
# Make 2**n amount of data
new_y, bool_y = wave.pad(xy['y'])
orig_mask = np.where(bool_y==True)
# wavelet transform parameters
levels = 8
wf = 'h'
k = 2
# Remove Noise first
# Wave transform
wt = wave.uwt(new_y, wf, k, levels)
# Matrix of the difference between each wavelet level and the original data
diff_array = np.array([(wave.iuwt(wt[i:i+1], wf, k)-new_y) for i in range(len(wt))])
# Index of the level which is most similar to original data (to obtain smoothed data)
indx = np.argmin(np.sum(diff_array**2, axis=1))
# Use the wavelet levels around this region
noise_wt = wt[indx:indx+1]
# smoothed data in 2^n length
new_y = wave.iuwt(noise_wt, wf, k)
# Background Removal
error = 10000
errdiff = 100
i = -1
iter_y_dict = {0:np.copy(new_y)}
bkg_approx_dict = {0:np.array([])}
while abs(errdiff)>=1*10**-24:
i += 1
# Wave transform
wt = wave.uwt(iter_y_dict[i], wf, k, levels)
# Assume last slice is lowest frequency (background approximation)
bkg_wt = wt[-3:-1]
bkg_approx_dict[i] = wave.iuwt(bkg_wt, wf, k)
# Get the error
errdiff = error - sum(iter_y_dict[i] - bkg_approx_dict[i])**2
error = sum(iter_y_dict[i] - bkg_approx_dict[i])**2
# Make every peak higher than bkg_wt
diff = (new_y - bkg_approx_dict[i])
peak_idxs_to_remove = np.where(diff>0.)[0]
iter_y_dict[i+1] = np.copy(new_y)
iter_y_dict[i+1][peak_idxs_to_remove] = np.copy(bkg_approx_dict[i])[peak_idxs_to_remove]
# new data without noise and background
new_y = new_y[orig_mask]
bkg_approx = bkg_approx_dict[len(bkg_approx_dict.keys())-1][orig_mask]
new_data = diff[orig_mask]
##############################################################
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(121)
ax_WT = fig.add_subplot(122)
ax_raw_data.plot(xy['x'], xy['y'], 'g')
for bkg in bkg_approx_dict.values():
ax_raw_data.plot(xy['x'], bkg[orig_mask], 'k')
ax_WT.plot(xy['x'], new_data, 'y')
fig.tight_layout()
plt.show()
And here is the output I am getting now:
As you can see, there is still a problem with the background removal (it shifts to the right after each iteration), but it is a different question which I will address here.