Related
I have a Python project where I need to redraw a line many times with the points in random places but keeping the line's shape and point count roughly the same. The final output will be using polygonal points and not Bezier paths (though I wouldn't be opposed to using Bezier as an intermediary step).
This animation is demonstrating how the points could move along the line to different positions while maintaining the general shape.
I also have a working example below where I'm moving along the line and picking random new points between existing points (the red line, below). It works okay, but I'd love to hear some other approaches I might take if someone knows of a better one?
Though this code is using matplotlib to demonstrate the line, the final program will not.
import numpy as np
from matplotlib import pyplot as plt
import random
from random import (randint,uniform)
def move_along_line(p1, p2, scalar):
distX = p2[0] - p1[0]
distY = p2[1] - p1[1]
modX = (distX * scalar) + p1[0]
modY = (distY * scalar) + p1[1]
return [modX, modY]
x_coords = [213.5500031,234.3809357,255.211853,276.0427856,296.8737183,317.7046204,340.1997681,364.3751221,388.5505066,414.8896484,444.5192261,478.5549622,514.5779419,545.4779053,570.3830566,588.0241699,598.2469482,599.772583,596.758728,593.7449341,590.7310791,593.373291,610.0373535,642.1326294,677.4451904,710.0697021,737.6887817,764.4020386,791.1152954,817.8284912,844.541687,871.2550049,897.9682007,924.6813965,951.3945923,978.1078491,1009.909546,1042.689941,1068.179199,1089.543091]
y_coords = [487.3099976,456.8832703,426.4565125,396.0297852,365.6030273,335.1763,306.0349426,278.1913452,250.3477478,224.7166748,203.0908051,191.2358704,197.6810608,217.504303,244.4946136,276.7698364,312.0551453,348.6885986,385.4395447,422.1904297,458.9414063,495.5985413,527.0128479,537.1477661,527.6642456,510.959259,486.6988525,461.2799683,435.8611145,410.4422913,385.023468,359.6045532,334.18573,308.7669067,283.3480835,257.929184,239.4429474,253.6099091,280.1803284,310.158783]
plt.plot(x_coords,y_coords,color='b')
plt.scatter(x_coords,y_coords,s=2)
new_line_x = []
new_line_y = []
for tgt in range(len(x_coords)-1):
#tgt = randint(0, len(x_coords)-1)
next_pt = tgt+1
new_pt = move_along_line([x_coords[tgt],y_coords[tgt]], [x_coords[next_pt],y_coords[next_pt]], uniform(0, 1))
new_line_x.append(new_pt[0])
new_line_y.append(new_pt[1])
plt.plot(new_line_x,new_line_y,color='r')
plt.scatter(new_line_x,new_line_y,s=10)
ax = plt.gca()
ax.set_aspect('equal')
plt.show()
Thank you very much!
I'm not sure if this is the most optimal way to do this but essentially you want to follow these steps:
Calculate the distance of the entire path, and the distance between all the points. Then for each point, tally the distances to that point.
Generate a new set of random points along the path starting with 0, then for each pair of points calculate a random distance: random value between 0 and 1 * total length of the path.
Sort these distances from smallest to largest.
For each random distance loop over the distances find the index where the random distance is > than distance i, and less than distance i+1. Interpolate new x and y values from these points.
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import numpy
import random
import math
x_coords = [195.21,212.53,237.39,270.91,314.21,368.43,434.69,514.1,607.8,692.69,746.98,773.8,776.25,757.45,720.52,668.55,604.68,545.37,505.79,487.05,490.27,516.58,567.09,642.93,745.2,851.5,939.53,1010.54,1065.8,1106.58,1134.15,1149.75,1154.68]
y_coords = [195.34,272.27,356.59,438.98,510.14,560.76,581.52,563.13,496.27,404.39,318.83,242.15,176.92,125.69,91.02,75.48,81.62,113.49,168.57,239.59,319.29,400.38,475.6,537.67,579.32,586.78,558.32,504.7,436.69,365.05,300.55,253.95,236.03]
n_points = 100
x_coords = numpy.array(x_coords)
x_min = x_coords.min()
x_max = x_coords.max()
x_range = x_max - x_min
distances = []
tallied_distances = [0]
tallied_distance = 0
for i in range(0, len(x_coords) -1):
xi = x_coords[i]
xf = x_coords[i + 1]
yi= y_coords[i]
yf = y_coords[i+1]
d = math.sqrt((xf-xi)**2 + (yf-yi)**2)
tallied_distance += d
tallied_distances.append(tallied_distance)
random_distances_along_line = [0]
for i in range(0, n_points-2):
random_distances_along_line.append(random.random()*tallied_distance)
random_distances_along_line.sort()
new_x_points = [x_coords[0]]
new_y_points = [y_coords[0]]
for i in range(0, len(random_distances_along_line)):
dt = random_distances_along_line[i]
for j in range(0, len(tallied_distances)-1):
di = tallied_distances[j]
df = tallied_distances[j+1]
if di < dt and dt < df:
difference = dt - di
xi = x_coords[j]
xf = x_coords[j+1]
yi = y_coords[j]
yf = y_coords[j+1]
xt = xi+(xf-xi)*difference/(df-di)
yt = yi+(yf-yi)*difference/(df-di)
new_x_points.append(xt)
new_y_points.append(yt)
new_x_points.append(x_coords[len(x_coords)-1])
new_y_points.append(y_coords[len(y_coords)-1])
plt.plot(new_x_points, new_y_points)
plt.scatter(new_x_points, new_y_points,s=2)
ax = plt.gca()
ax.set_aspect('equal')
plt.show()
Given a set of circles with random centers and radii, I would like to be able to prune this set so that if overlap between circles occurs, only the largest circle is retained. This is a similar question to the one answered here, but the problem listed there seeks to retain the maximum number of non-overlapping circles, from what I understand. I'd like to be able to adapt the ILP solution given there to my needs, if possible, although a brute-force "search and remove"-type approach would be fine too. The latter is what I've tried so far, but failed to accomplish.
import matplotlib.pyplot as plt
from numpy.random import rand, seed
seed(1)
N = 25 # number of circles
L = 10 # domain size
Rmin = 0.5 # min radius
Rmax = 1 # max radius
cx = rand(N)*(L-2*Rmax) + Rmax
cy = rand(N)*(L-2*Rmax) + Rmax
r = rand(N)*(Rmax-Rmin) + Rmin
# Plotting
for i in range(N):
plt.gca().add_artist(plt.Circle((cx[i], cy[i]), r[i], ec='black', fc='white'))
plt.axis('image')
plt.xlim(0,L)
plt.ylim(0,L)
plt.show()
Desired Result:
It got a bit messy, but this creates the Output you wanted.
import matplotlib.pyplot as plt
from numpy.random import rand, seed
import math
import numpy as np
import pandas as pd
def find_larger(df_circles_2, idx):
found_greater = False
for i,row in df_circles_2.iterrows():
if i != idx:
distance = math.sqrt( (row['x'] - df_circles_2['x'][idx])**2 + (row['y'] - df_circles_2['y'][idx])**2 )
if distance < (row['r'] + df_circles_2['r'][i]):
if row['r'] > df_circles_2['r'][idx] and (row['keep'] != "discard"):
if df_circles['keep'][i] == "keep":
return "discard"
found_greater = True
if found_greater:
return "undecided"
else:
return "keep"
seed(1)
N = 25 # number of circles
L = 10 # domain size
Rmin = 0.5 # min radius
Rmax = 1 # max radius
cx = rand(N)*(L-2*Rmax) + Rmax
cy = rand(N)*(L-2*Rmax) + Rmax
r = rand(N)*(Rmax-Rmin) + Rmin
# Plotting
for i in range(N):
plt.gca().add_artist(plt.Circle((cx[i], cy[i]), r[i], ec='black', fc='white'))
plt.gca().add_artist(plt.Text(cx[i], cy[i], text = str(i)))
plt.axis('image')
plt.xlim(0,L)
plt.ylim(0,L)
plt.show()
# Searching:
df_circles = pd.DataFrame(np.array([cx, cy, r]).T, columns = ['x', 'y', 'r'])
df_circles['keep'] = "undecided"
while(df_circles['keep'].str.contains('undecided').any()):
for i, row in df_circles.iterrows():
if row['keep'] == "undecided":
df_circles.at[i, 'keep'] = find_larger(df_circles, i)
# Plotting 2
plt.figure(2)
for i in range(N):
if df_circles['keep'][i] == "keep":
plt.gca().add_artist(plt.Circle((cx[i], cy[i]), r[i], ec='black', fc='black'))
else:
plt.gca().add_artist(plt.Circle((cx[i], cy[i]), r[i], ec='black', fc='white'))
plt.axis('image')
plt.xlim(0,L)
plt.ylim(0,L)
plt.show()
I am plotting 2D images of energy and density distribution. There is always a slight misalignment in the mapping where the very first "columns" seem to go to the last columns during the plot.
I have attach link to for data test file.
Data files
Here is the plot :
Is there anything to prevent this ?
The partial code in plotting is as follows:
import numpy as np
import matplotlib.pyplot as plt
import pylab as pyl
import scipy.stats as ss
import matplotlib.ticker as ticker
import matplotlib.transforms as tr
#%matplotlib inline
pi = 3.1415
n = 5e24 # density plasma
m = 9.109e-31
eps = 8.85e-12
e = 1.6021725e-19
c = 3e8
wp=np.sqrt(n*e*e/(m*eps))
kp = np.sqrt(n*e*e/(m*eps))/c #plasma wavenumber
case=400
## decide on the target range of analysis for multiples
start= 20500
end = 21500
gap = 1000
## Multiples plots
def target_range (start, end, gap):
while start<= end:
yield start
start += gap
for step in target_range(start, end, gap):
fdata =np.genfromtxt('./beam_{}'.format(step)).reshape(-1,6)
## dimension, dt, and superpaticle
xBoxsize = 50e-6 #window size
yBoxsize = 80e-6 #window size
xbind = 10
ybind = 1
dx = 4e-8 #cell size
dy = 4e-7 #cell size
dz = 1e-6 #assume to be same as dy
dt = 1.3209965456e-16
sptcl = 1.6e10
xsub = 0e-6
xmax = dt*step*c
xmin = xmax - xBoxsize
ysub = 1e-7
ymin = ysub #to make our view window
ymax = yBoxsize - ysub
xbins = int((xmax - xmin)/(dx*xbind))
ybins = int((ymax - ymin)/(dy*ybind))
#zbins = int((zmax - zmin)/dz) #option for 3D
# To make or define "data_arr" as a matrix with 2D array size 'xbins x ybins'
data_arr = np.zeros((2,xbins,ybins), dtype=np.float)
for line in fdata:
x = int((line[0]-xmin)/(dx*xbind))
y = int((line[1]-ymin)/(dy*ybind))
#z = int((line[2]-zmin)/dz)
if x >= xbins: x = xbins - 1
if y >= ybins: y = ybins - 1
#if z >= zbins: z = zbins - 1
data_arr[0, x, y] = data_arr[0,x, y] + 1 #cummulative adding up the number of particles
energy_total = np.sqrt(1+ line[2]*line[2]/(c*c)+line[3]*line[3]/(c*c))/0.511
data_arr[1, x, y] += energy_total
#array 1 tells us the energy while array 0 tells us the particles
## make average energy , total energy/particle number
np.errstate(divide='ignore',invalid='ignore')
en_arr = np.true_divide(data_arr[1],data_arr[0]) # total energy/number of particles
en_arr[en_arr == np.inf] = 0
en_arr = np.nan_to_num(en_arr)
en_arr = en_arr.T
## This part is real density of the distribution
data_arr[0]= data_arr[0] * sptcl/dx/dy #in m-3
d = data_arr[0].T
## Plot and save density and energy distribution figures
den_dist=plt.figure(1)
plt.imshow(d,origin='lower', aspect = 'auto',cmap =plt.get_cmap('gnuplot'),extent =(xmin/1e-3,xmax/1e-3,ymin/1e-6,ymax/1e-6))
plt.title('Density_dist [m-3]_{}'.format(step))
plt.xlabel('distance[mm]')
plt.ylabel('y [um]')
plt.colorbar()
plt.show()
den_dist.savefig("./Qen_distribution_{}.png".format(step),format ='png')
#note:cmap: rainbow, hot,jet,gnuplot,plasma
energy_dist=plt.figure(2)
plt.imshow(en_arr, origin ='lower',aspect = 'auto', cmap =plt.get_cmap('jet'),extent =(xmin/1e-3,xmax/1e-3,ymin/1e-6,ymax/1e-6))
plt.title ('Energy_dist [MeV]_{} '.format(step))
plt.xlabel('distance[mm]')
plt.ylabel('y [um]')
plt.colorbar()
plt.show()
energy_dist.savefig("./Qenergy_distribution_{}.png".format(step),format ='png')
I have been trying to plot a radial profile of a fits image using a modified script I found on-line. I always get y axis units which are completely different to what's expected. I'm not even sure what the y axis units are. I have attached the fits file and a profile I keep getting and the correct radial profile I plotted using another program.
I am very new to python so I have no idea why this keeps happening. Any help to fix this will be so greatly appreciated.
This is the code I've been using:
import numpy as np
import pyfits
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[1] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
print center
print i_sorted
print radial_prof
return radial_prof
#read in image
hdulist = pyfits.open('cit6ndf2fitsexample.fits')
scidata = np.array(hdulist[0].data)[0,:,:]
center = None
radi = 10
rad = azimuthalAverage(scidata, center)
plt.xlabel('radius(pixels?)', fontsize=12)
plt.ylabel('image intensity', fontsize=12)
plt.xlim(0,10)
plt.ylim(0, 3.2)
plt.plot(rad[radi:])
plt.savefig('testfig1.png')
plt.show()
Profile with wrong y axis units
Profile with expected correct units created using Celtech Aperture Photometry Tool.
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
minorLocator = AutoMinorLocator()
def radial_profile(data, center):
x, y = np.indices((data.shape))
r = np.sqrt((x - center[0])**2 + (y - center[1])**2)
r = r.astype(np.int)
tbin = np.bincount(r.ravel(), data.ravel())
nr = np.bincount(r.ravel())
radialprofile = tbin / nr
return radialprofile
fitsFile = fits.open('testfig.fits')
img = fitsFile[0].data[0]
img[np.isnan(img)] = 0
#center = np.unravel_index(img.argmax(), img.shape)
center = (-fitsFile[0].header['LBOUND2']+1, -fitsFile[0].header['LBOUND1']+1)
rad_profile = radial_profile(img, center)
fig, ax = plt.subplots()
plt.plot(rad_profile[0:22], 'x-')
ax.xaxis.set_minor_locator(minorLocator)
plt.tick_params(which='both', width=2)
plt.tick_params(which='major', length=7)
plt.tick_params(which='minor', length=4, color='r')
plt.grid()
ax.set_ylabel(fitsFile[0].header['Label'] + " (" + fitsFile[0].header['BUNIT'] + ")")
ax.set_xlabel("Pixels")
plt.grid(which="minor")
plt.show()
EDIT:
I added a commented line for retrieving the center from the headers. But you would have to test more fits files before choosing to use argmax or the header info to find the center.
First part of the header info:
SIMPLE = T / file does conform to FITS standard
BITPIX = -64 / number of bits per data pixel
NAXIS = 3 / number of data axes
NAXIS1 = 259 / length of data axis 1
NAXIS2 = 261 / length of data axis 2
NAXIS3 = 1 / length of data axis 3
EXTEND = T / FITS dataset may contain extensions
COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy
COMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H
LBOUND1 = -133 / Pixel origin along axis 1
LBOUND2 = -128 / Pixel origin along axis 2
LBOUND3 = 1 / Pixel origin along axis 3
OBJECT = 'CIT 6 ' / Title of the dataset
LABEL = 'Flux Density' / Label of the primary array
BUNIT = 'mJy/arcsec**2' / Units of the primary array
DATE = '2015-12-18T06:45:40' / file creation date (YYYY-MM-DDThh:mm:ss UT)
ORIGIN = 'East Asian Observatory' / Origin of file
BSCALE = 1.0 / True_value = BSCALE * FITS_value + BZERO
BZERO = 0.0 / True_value = BSCALE * FITS_value + BZERO
HDUCLAS1= 'NDF ' / Starlink NDF (hierarchical n-dim format)
HDUCLAS2= 'DATA ' / Array component subclass
HDSTYPE = 'NDF ' / HDS data type of the component
TELESCOP= 'JCMT ' / Name of Telescope
Main Problem: How can the scipy.signal.cwt() function be inversed.
I have seen where Matlab has an inverse continuous wavelet transform function which will return the original form of the data by inputting the wavelet transform, although you can filter out the slices you don't want.
MATALAB inverse cwt funciton
Since scipy doesn't appear to have the same function, I have been trying to figure out how to get the data back in the same form, while removing the noise and background.
How do I do this?
I tried squaring it to remove negative values, but this gives me values way to large and not quite right.
Here is what I have been trying:
# Compute the wavelet transform
widths = range(1,11)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT_to_original_data = (xy['y'] * cwtmatr)**2
And here is a fully compilable short script to show you the type of data I am trying to get and what I have etc.:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
# Make some random data with peaks and noise
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 60 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 40 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
y_noise = np.random.normal(loc=30, scale=10, size=len(x))
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
# Compute the wavelet transform
# I can't figure out what the width is or does?
widths = range(1,11)
# Ricker is 2nd derivative of Gaussian
# (*close* to what *most* of the features are in my data)
# (They're actually Lorentzians and Breit-Wigner-Fano lines)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT = (xy['y'] * cwtmatr)**2
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(4,3,1)
ax = {}
for i in range(0, 11):
ax[i] = fig.add_subplot(4,3, i+2)
ax_desired_transformed_data = fig.add_subplot(4,3,12)
ax_raw_data.plot(xy['x'], xy['y'], 'g-')
for i in range(0,10):
ax[i].plot(xy['x'], WT[i])
ax_desired_transformed_data.plot(xy['x'], desired_peaks, 'k-')
fig.tight_layout()
plt.show()
This script will output this image:
Where the first plot is the raw data, the middle plots are the wavelet transforms and the last plot is what I want to get out as the processed (background and noise removed) data.
Does anyone have any suggestions? Thank you so much for the help.
I ended up finding a package which provides an inverse wavelet transform function called mlpy. The function is mlpy.wavelet.uwt. This is the compilable script I ended up with which may interest people if they are trying to do noise or background removal:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mlpy.wavelet as wave
# Make some random data with peaks and noise
############################################################
def gen_data():
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 100 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 80 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
# make x axis
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
avg_noise_level = 30
std_dev_noise = 10
size = len(x)
scattering_noise_amp = 100
scat_center = 100
scat_width = 15
scat_std_dev_noise = 100
y_scattering_noise = np.random.normal(scattering_noise_amp, scat_std_dev_noise, size) * np.e**(-(x-scat_center)**2/(2*scat_width**2))
y_noise = np.random.normal(avg_noise_level, std_dev_noise, size) + y_scattering_noise
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
return xy
# Random data Generated
#############################################################
xy = gen_data()
# Make 2**n amount of data
new_y, bool_y = wave.pad(xy['y'])
orig_mask = np.where(bool_y==True)
# wavelet transform parameters
levels = 8
wf = 'h'
k = 2
# Remove Noise first
# Wave transform
wt = wave.uwt(new_y, wf, k, levels)
# Matrix of the difference between each wavelet level and the original data
diff_array = np.array([(wave.iuwt(wt[i:i+1], wf, k)-new_y) for i in range(len(wt))])
# Index of the level which is most similar to original data (to obtain smoothed data)
indx = np.argmin(np.sum(diff_array**2, axis=1))
# Use the wavelet levels around this region
noise_wt = wt[indx:indx+1]
# smoothed data in 2^n length
new_y = wave.iuwt(noise_wt, wf, k)
# Background Removal
error = 10000
errdiff = 100
i = -1
iter_y_dict = {0:np.copy(new_y)}
bkg_approx_dict = {0:np.array([])}
while abs(errdiff)>=1*10**-24:
i += 1
# Wave transform
wt = wave.uwt(iter_y_dict[i], wf, k, levels)
# Assume last slice is lowest frequency (background approximation)
bkg_wt = wt[-3:-1]
bkg_approx_dict[i] = wave.iuwt(bkg_wt, wf, k)
# Get the error
errdiff = error - sum(iter_y_dict[i] - bkg_approx_dict[i])**2
error = sum(iter_y_dict[i] - bkg_approx_dict[i])**2
# Make every peak higher than bkg_wt
diff = (new_y - bkg_approx_dict[i])
peak_idxs_to_remove = np.where(diff>0.)[0]
iter_y_dict[i+1] = np.copy(new_y)
iter_y_dict[i+1][peak_idxs_to_remove] = np.copy(bkg_approx_dict[i])[peak_idxs_to_remove]
# new data without noise and background
new_y = new_y[orig_mask]
bkg_approx = bkg_approx_dict[len(bkg_approx_dict.keys())-1][orig_mask]
new_data = diff[orig_mask]
##############################################################
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(121)
ax_WT = fig.add_subplot(122)
ax_raw_data.plot(xy['x'], xy['y'], 'g')
for bkg in bkg_approx_dict.values():
ax_raw_data.plot(xy['x'], bkg[orig_mask], 'k')
ax_WT.plot(xy['x'], new_data, 'y')
fig.tight_layout()
plt.show()
And here is the output I am getting now:
As you can see, there is still a problem with the background removal (it shifts to the right after each iteration), but it is a different question which I will address here.