I need to compute bspline curves in python. I looked into scipy.interpolate.splprep and a few other scipy modules but couldn't find anything that readily gave me what I needed. So i wrote my own module below. The code works fine, but it is slow (test function runs in 0.03s, which seems like a lot considering i'm only asking for 100 samples with 6 control vertices).
Is there a way to simplify the code below with a few scipy module calls, which presumably would speed it up? And if not, what could i do to my code to improve its performance?
import numpy as np
# cv = np.array of 3d control vertices
# n = number of samples (default: 100)
# d = curve degree (default: cubic)
# closed = is the curve closed (periodic) or open? (default: open)
def bspline(cv, n=100, d=3, closed=False):
# Create a range of u values
count = len(cv)
knots = None
u = None
if not closed:
u = np.arange(0,n,dtype='float')/(n-1) * (count-d)
knots = np.array([0]*d + range(count-d+1) + [count-d]*d,dtype='int')
else:
u = ((np.arange(0,n,dtype='float')/(n-1) * count) - (0.5 * (d-1))) % count # keep u=0 relative to 1st cv
knots = np.arange(0-d,count+d+d-1,dtype='int')
# Simple Cox - DeBoor recursion
def coxDeBoor(u, k, d):
# Test for end conditions
if (d == 0):
if (knots[k] <= u and u < knots[k+1]):
return 1
return 0
Den1 = knots[k+d] - knots[k]
Den2 = knots[k+d+1] - knots[k+1]
Eq1 = 0;
Eq2 = 0;
if Den1 > 0:
Eq1 = ((u-knots[k]) / Den1) * coxDeBoor(u,k,(d-1))
if Den2 > 0:
Eq2 = ((knots[k+d+1]-u) / Den2) * coxDeBoor(u,(k+1),(d-1))
return Eq1 + Eq2
# Sample the curve at each u value
samples = np.zeros((n,3))
for i in xrange(n):
if not closed:
if u[i] == count-d:
samples[i] = np.array(cv[-1])
else:
for k in xrange(count):
samples[i] += coxDeBoor(u[i],k,d) * cv[k]
else:
for k in xrange(count+d):
samples[i] += coxDeBoor(u[i],k,d) * cv[k%count]
return samples
if __name__ == "__main__":
import matplotlib.pyplot as plt
def test(closed):
cv = np.array([[ 50., 25., -0.],
[ 59., 12., -0.],
[ 50., 10., 0.],
[ 57., 2., 0.],
[ 40., 4., 0.],
[ 40., 14., -0.]])
p = bspline(cv,closed=closed)
x,y,z = p.T
cv = cv.T
plt.plot(cv[0],cv[1], 'o-', label='Control Points')
plt.plot(x,y,'k-',label='Curve')
plt.minorticks_on()
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(35, 70)
plt.ylim(0, 30)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
test(False)
The two images below shows what my code returns with both closed conditions:
So after obsessing a lot about my question, and much research, i finally have my answer. Everything is available in scipy , and i'm putting my code here so hopefully someone else can find this useful.
The function takes in an array of N-d points, a curve degree, a periodic state (opened or closed) and will return n samples along that curve. There are ways to make sure the curve samples are equidistant but for the time being i'll focus on this question, as it is all about speed.
Worthy of note: I can't seem to be able to go beyond a curve of 20th degree. Granted, that's overkill already, but i figured it's worth mentioning.
Also worthy of note: on my machine the code below can calculate 100,000 samples in 0.017s
import numpy as np
import scipy.interpolate as si
def bspline(cv, n=100, degree=3, periodic=False):
""" Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
False - Curve is open
"""
# If periodic, extend the point array by count+degree+1
cv = np.asarray(cv)
count = len(cv)
if periodic:
factor, fraction = divmod(count+degree+1, count)
cv = np.concatenate((cv,) * factor + (cv[:fraction],))
count = len(cv)
degree = np.clip(degree,1,degree)
# If opened, prevent degree from exceeding count-1
else:
degree = np.clip(degree,1,count-1)
# Calculate knot vector
kv = None
if periodic:
kv = np.arange(0-degree,count+degree+degree-1)
else:
kv = np.clip(np.arange(count+degree+1)-degree,0,count-degree)
# Calculate query range
u = np.linspace(periodic,(count-degree),n)
# Calculate result
return np.array(si.splev(u, (kv,cv.T,degree))).T
To test it:
import matplotlib.pyplot as plt
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
cv = np.array([[ 50., 25.],
[ 59., 12.],
[ 50., 10.],
[ 57., 2.],
[ 40., 4.],
[ 40., 14.]])
plt.plot(cv[:,0],cv[:,1], 'o-', label='Control Points')
for d in range(1,21):
p = bspline(cv,n=100,degree=d,periodic=True)
x,y = p.T
plt.plot(x,y,'k-',label='Degree %s'%d,color=colors[d%len(colors)])
plt.minorticks_on()
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(35, 70)
plt.ylim(0, 30)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
Results for both opened or periodic curves:
ADDENDUM
As of scipy-0.19.0 there is a new scipy.interpolate.BSpline function that can be used.
import numpy as np
import scipy.interpolate as si
def scipy_bspline(cv, n=100, degree=3, periodic=False):
""" Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
"""
cv = np.asarray(cv)
count = cv.shape[0]
# Closed curve
if periodic:
kv = np.arange(-degree,count+degree+1)
factor, fraction = divmod(count+degree+1, count)
cv = np.roll(np.concatenate((cv,) * factor + (cv[:fraction],)),-1,axis=0)
degree = np.clip(degree,1,degree)
# Opened curve
else:
degree = np.clip(degree,1,count-1)
kv = np.clip(np.arange(count+degree+1)-degree,0,count-degree)
# Return samples
max_param = count - (degree * (1-periodic))
spl = si.BSpline(kv, cv, degree)
return spl(np.linspace(0,max_param,n))
Testing for equivalency:
p1 = bspline(cv,n=10**6,degree=3,periodic=True) # 1 million samples: 0.0882 sec
p2 = scipy_bspline(cv,n=10**6,degree=3,periodic=True) # 1 million samples: 0.0789 sec
print np.allclose(p1,p2) # returns True
Giving optimization tips without profiling data is a bit like shooting in the dark. However, the function coxDeBoor seems to be called very often. This is where I would start optimizing.
Function calls in Python are expensive. You should try to replace the coxDeBoor recursion with iteration to avoid excessive function calls. Some general information how to do this can be found in answers to this question. As stack/queue you can use collections.deque.
Related
Somehow the following code raises the error "ValueError: x0 must have at most 1 dimension." as soon as I add bounds to my Fit. I have absolutely no idea what I'm doing wrong here.
The Goal is to restrain the fit of the 8 Lorentzian Curves to the given bounds.
However, the presented code propably won't lead to a fit, but this is a problem I should be able to solve.
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
from scipy.signal import find_peaks, peak_widths
import time
# Functions needed for Fitting model
def lorentzian(x, amp, cen, wid):
return amp*wid**2/((x-cen)**2+wid**2)
def multi_lorentzian(x, params, *args):
if args:
params = [params] + [x for x in args]
try:
params = np.array(params).reshape(len(params)//3, 3)
except:
raise ValueError("Parameter dimensions don't fit the model!")
total_curve = 0
for amp, cen, wid in params:
total_curve += lorentzian(x, amp, cen, wid)
return total_curve
##############################################################################
# create data
samples = 200
start = 2.75
stop = 3
x_incr = (stop-start)/samples
x_array = np.linspace(start, stop, samples) # frequency in GHz
amp_array = np.random.uniform(0.03, 0.1, 8) # 3 bis 10% Kontrast
cen_array = [2.81, 2.829, 2.831, 2.848, 2.897, 2.914, 2.9165, 2.932]
# cen_array = np.random.uniform(start, stop, 8)
wid_array = [0.003, 0.003, 0.003,0.003, 0.003, 0.003, 0.003, 0.003]
y_array = 1-multi_lorentzian(x_array,
np.array([amp_array, cen_array, wid_array]).T)
y_noise = y_array + np.random.normal(0, 1, samples)*1e-3
# mirroring to get maxima instead of minima
y_noise_inv = -y_noise+1
##############################################################################
# prepare guessing of start values
heights= np.random.uniform(0.03, 0.1, 8)
widths = np.random.uniform(0.002, 0.004, 8)
center_guess = cen_array+np.random.normal(0, 1, 8)*1e-3
p0_array =np.array([heights,center_guess, widths]).T
bounds_array = ([0., 2.75, 0.], [1., 3., 0.5])
popt_y, pcov_y = scipy.optimize.curve_fit(multi_lorentzian, x_array, y_noise_inv,
p0=p0_array, bounds= bounds_array)
popt_y = popt_y.reshape(len(popt_y)//3, 3)
single_peaks = [lorentzian(x_array, i, j, k) for i,j,k in popt_y]
perr_y = np.sqrt(np.diag(pcov_y))
residual_y = y_noise_inv - multi_lorentzian(x_array, popt_y)
ss_res = np.sum(residual_y**2)
ss_tot = np.sum((y_noise_inv-np.mean(y_noise_inv))**2)
r_squared = 1 - (ss_res / ss_tot)
Ok, after some digging, the issue was quite simple. p0 is supposed to be flat, not a 2D array that you supplied. I only had to change two lines to make things work.
1st, the bounds array. You're supposed to have as many minimum and maximum values as you have parameters, and since you have 3*8 params, then I just multiplied them as shown here.
bounds_array = ([0., 2.75, 0.]*8, [1., 3., 0.5]*8)
2nd, I flattened p0 when calling curve_fit.
popt_y, pcov_y = scipy.optimize.curve_fit(multi_lorentzian, x_array, y_noise_inv, p0=p0_array.flatten(), bounds= bounds_array)
And this is the fit:
I would like to combine multiple Mayavi objects into a single "grouped" object so that I control all of their properties together. For example I created the following bi-convex lens shape by combining 3 built-in surfaces (two spheres and one cylinder). Now I would like to assign uniform properties (specularity, ambient color, etc) to all of the constituent surfaces at a time (not individually). Also, I would like to translate/rotate the lens as a whole. I am not sure how to accomplish this.
Here is the bi-convex lens created in Mayavi (code given below):
As it can be seen in the following figure, the above lens is composed of three surfaces:
Here is the code for building the bi-convex lens:
import numpy as np
from mayavi import mlab
from mayavi.sources.builtin_surface import BuiltinSurface
from mayavi.modules.surface import Surface
from mayavi.filters.transform_data import TransformData
def lensUsingMayaviBuiltinSphere(radius=0.5, semiDiam=0.25, thickness=0.9):
"""
Render a bi-convex lens
"""
engine = mlab.get_engine()
sag = radius - np.sqrt(radius**2 - semiDiam**2)
cyl_height = thickness - 2.0*sag # thickness of the cylinder in between
# Create Mayavi data sources -- sphere_h1_src, sphere_h2_src, cylinder_src
# half 1: source = sphere_h1_src
sphere_h1_src = BuiltinSurface()
engine.add_source(sphere_h1_src)
sphere_h1_src.source = 'sphere'
sphere_h1_src.data_source.radius = radius
sphere_h1_src.data_source.center = np.array([ 0., 0., -np.sqrt(radius**2 - semiDiam**2) + cyl_height/2.0])
sphere_h1_src.data_source.end_phi = np.rad2deg(np.arcsin(semiDiam/radius)) #60.0
sphere_h1_src.data_source.end_theta = 360.0
sphere_h1_src.data_source.phi_resolution = 300
sphere_h1_src.data_source.theta_resolution = 300
# half 2: source = sphere_h2_src
sphere_h2_src = BuiltinSurface()
engine.add_source(sphere_h2_src)
sphere_h2_src.source = 'sphere'
sphere_h2_src.data_source.radius = radius
sphere_h2_src.data_source.center = np.array([ 0., 0., np.sqrt(radius**2 - semiDiam**2) - cyl_height/2.0])
sphere_h2_src.data_source.start_phi = 180.0 - np.rad2deg(np.arcsin(semiDiam/radius))
sphere_h2_src.data_source.end_phi = 180.0
sphere_h2_src.data_source.end_theta = 360.0
sphere_h2_src.data_source.phi_resolution = 300
sphere_h2_src.data_source.theta_resolution = 300
# cylinder source data in between
cylinder_src = BuiltinSurface()
engine.add_source(cylinder_src)
cylinder_src.source = 'cylinder'
cylinder_src.data_source.center = np.array([ 0., 0., 0.])
if cyl_height > 0:
cylinder_src.data_source.height = cyl_height
else:
cylinder_src.data_source.height = 0.0
cylinder_src.data_source.radius = semiDiam
cylinder_src.data_source.capping = False
cylinder_src.data_source.resolution = 50
# Add transformation filter to align cylinder length along z-axis
transform_data_filter = TransformData()
engine.add_filter(transform_data_filter, cylinder_src)
Rt_c = [ 1.0000, 0.0000, 0.0000, 0.00,
0.0000, 0.0000, -1.0000, 0.00,
0.0000, 1.0000, 0.0000, 0.00,
0.0000, 0.0000, 0.0000, 1.00]
transform_data_filter.transform.matrix.__setstate__({'elements': Rt_c})
transform_data_filter.widget.set_transform(transform_data_filter.transform)
transform_data_filter.filter.update()
transform_data_filter.widget.enabled = False # disable the rotation control further.
# Add surface modules to each source
right_surface = Surface()
engine.add_filter(right_surface, sphere_h1_src)
left_surface = Surface()
engine.add_filter(left_surface, sphere_h2_src)
cyl_surface = Surface()
engine.add_filter(cyl_surface, transform_data_filter)
fig = mlab.figure()
# Add lens
lensUsingMayaviBuiltinSphere(radius=2, semiDiam=1.2)
mlab.show()
I don't know of a way to combine sources in the way you are looking for. I think in fact that is probably impossible since under the hood the BuiltinSurface object has specific vtk sources that are not what you want. It should however be possible to simply use a different source that gives what you want. In this case you could generate a biconvex lens with mlab.mesh:
a,c,h=3,1,.2
phi,theta = np.mgrid[0:2*np.pi:np.pi/250, 0:2*np.pi:np.pi/250]
x=a*np.cos(theta)*np.sin(phi)
y=a*np.sin(theta)*np.sin(phi)
z=c*np.cos(phi)+(h*(-1)**(np.cos(phi)<0))
mlab.mesh(x,y,z,color=(1,1,1)
mlab.show()
One minor difference is that this surface is smooth. This is the nature of sampling a single surface --i.e., this result is a direct consequence of what your question asks to do. If this is an important feature of your figure, I would suggest an entirely different approach: wrap the 3 sources in a class and have the event handler update the relevant attributes on all three.
Based on the code by #aestrivex, here is one way of getting the desired output (lens with sharp edges). Note that this is not a solution for connecting multiple Mayavi objects.
import numpy as np
from mayavi import mlab
# Control parameters
# r is the semi-diameter of the lens
# c controls the center thickness of the lens
# h controls the curvature of the surfaces (lesser the value more the curvature)
r, c, h = 3, .75, .9
delta_phi = np.pi/250.0 # phi == azimuth (0 <= phi <= pi)
delta_theta = np.pi/100.0 # theta == zenith (0 <= theta <= pi)
phi, theta = np.mgrid[0:2.0*np.pi + delta_phi:delta_phi,0:np.pi + delta_theta:delta_theta]
# The Exact threshold values for masking tz, txy will change depending upon the
# sampling of theta. txy is always slightly less than tz. tz should be around 0.3
tz, txy = 0.279, 0.275
x = r*np.sin(theta)*np.cos(phi)*(np.abs(np.cos(theta)) > txy)
y = r*np.sin(theta)*np.sin(phi)*(np.abs(np.cos(theta)) > txy)
z = c*np.cos(theta)*(h**(-1)*( np.abs(np.cos(theta)) > tz))
mlab.mesh(x,y,z,color=(1,1,1))
mlab.show()
And here is the output:
I am trying to fit some data that are distributed in the time following an exponential decay. I tried to follow some fitting examples on the web, but my code doesn't fit the data. Only a straight line results from the fit. Maybe there is something wrong with the initial parameters? Until now I have only used gaussian and line fits, using the same method, that maybe is not correct for this case.
The code take the data from the web, so it is directly executable.
Question: why doesn't the code result in any fit?
Many thanks in advance.
#!/usr/bin/env python
import pyfits, os, re, glob, sys
from scipy.optimize import leastsq
from numpy import *
from pylab import *
from scipy import *
rc('font',**{'family':'serif','serif':['Helvetica']})
rc('ps',usedistiller='xpdf')
rc('text', usetex=True)
#------------------------------------------------------
tmin = 56200
tmax = 56249
data=pyfits.open('http://heasarc.gsfc.nasa.gov/docs/swift/results/transients/weak/GX304-1.orbit.lc.fits')
time = data[1].data.field(0)/86400. + data[1].header['MJDREFF'] + data[1].header['MJDREFI']
rate = data[1].data.field(1)
error = data[1].data.field(2)
data.close()
cond = ((time > 56210) & (time < 56225))
time = time[cond]
rate = rate[cond]
error = error[cond]
right_exp = lambda p, x: p[0]*exp(-p[1]*x)
err = lambda p, x, y:(right_exp(p, x) -y)
v0= [0.20, 56210.0, 1]
out = leastsq(err, v0[:], args = (time, rate), maxfev=100000, full_output=1)
v = out[0] #fit parameters out
xxx = arange(min(time), max(time), time[1] - time[0])
ccc = right_exp(v, xxx)
fig = figure(figsize = (9, 9)) #make a plot
ax1 = fig.add_subplot(111)
ax1.plot(time, rate, 'g.') #spectrum
ax1.plot(xxx, ccc, 'b-') #fitted spectrum
savefig("right exp.png")
axis([tmin-10, tmax, -0.00, 0.45])
Your problem is ill conditioned because your array times contains big numbers that when used in exp(-a*time) are giving values close to 0., which tricks the err function because your rate array contains small values also close to 0., leading to small errors. In other words, a high a in the exponential function gives a good solution.
To fix that you can:
change your decay function to include an initial time:
exp(-a*(time-time0))
change your input data to start from a smaller number:
time -= time.min()
For both options you have to change the initial guess v0, e.g. v0=[0.,0.]. The first solution seems more robust and you do not have to manage changes in your time array. A good initial guess for time0 is time.min():
right_exp = lambda p, x: p[0]*exp(-p[1]*(x-p[2]))
err = lambda p, x, y:(right_exp(p, x) -y)
v0= [0., 0., time.min() ]
out = leastsq(err, v0, args = (time, rate))
v = out[0] #fit parameters out
xxx = arange(min(time), max(time), time[1] - time[0])
ccc = right_exp(v, xxx)
fig = figure(figsize = (9, 9)) #make a plot
ax1 = fig.add_subplot(111)
ax1.plot(time, rate, 'g.') #spectrum
ax1.plot(xxx, ccc, 'b-') #fitted spectrum
fig.show()
Giving:
Still, the final results are depending on v0, e.g. with v0=[1.,1.,time.min()] it decays too fast and does not find the optimum.
I have some data of a particle moving in a corridor with closed boundary conditions.
Plotting the trajectory leads to a zig-zag trajectory.
I would like to know how to prevent plot() from connecting the points where the particle comes back to the start. Some thing like in the upper part of the pic, but without "."
The first idea I had was to find the index where the numpy array a[:-1]-a[1:] becomes positive and then plot from 0 to that index. But how would I get the index of the first occurrence of a positive element of a[:-1]-a[1:]?
Maybe there are some other ideas.
I'd go a different approach. First, I'd determine the jump points not by looking at the sign of the derivative, as probably the movement might go up or down, or even have some periodicity in it. I'd look at those points with the biggest derivative.
Second, an elegant approach to have breaks in a plot line is to mask one value on each jump. Then matplotlib will make segments automatically. My code is:
import pylab as plt
import numpy as np
xs = np.linspace(0., 100., 1000.)
data = (xs*0.03 + np.sin(xs) * 0.1) % 1
plt.subplot(2,1,1)
plt.plot(xs, data, "r-")
#Make a masked array with jump points masked
abs_d_data = np.abs(np.diff(data))
mask = np.hstack([ abs_d_data > abs_d_data.mean()+3*abs_d_data.std(), [False]])
masked_data = np.ma.MaskedArray(data, mask)
plt.subplot(2,1,2)
plt.plot(xs, masked_data, "b-")
plt.show()
And gives us as result:
The disadvantage of course is that you lose one point at each break - but with the sampling rate you seem to have I guess you can trade this in for simpler code.
To find where the particle has crossed the upper boundary, you can do something like this:
>>> import numpy as np
>>> a = np.linspace(0, 10, 50) % 5
>>> a = np.linspace(0, 10, 50) % 5 # some sample data
>>> np.nonzero(np.diff(a) < 0)[0] + 1
array([25, 49])
>>> a[24:27]
array([ 4.89795918, 0.10204082, 0.30612245])
>>> a[48:]
array([ 4.79591837, 0. ])
>>>
np.diff(a) calculates the discrete difference of a, while np.nonzero finds where the condition np.diff(a) < 0 is negative, i.e., the particle has moved downward.
To avoid the connecting line you will have to plot by segments.
Here's a quick way to plot by segments when the derivative of a changes sign:
import numpy as np
a = np.linspace(0, 20, 50) % 5 # similar to Micheal's sample data
x = np.arange(50) # x scale
indices = np.where(np.diff(a) < 0)[0] + 1 # the same as Micheal's np.nonzero
for n, i in enumerate(indices):
if n == 0:
plot(x[:i], a[:i], 'b-')
else:
plot(x[indices[n - 1]:i], a[indices[n - 1]:i], 'b-')
Based on Thorsten Kranz answer a version which adds points to the original data when the 'y' crosses the period. This is important if the density of data-points isn't very high, e.g. np.linspace(0., 100., 100) vs. the original np.linspace(0., 100., 1000). The x position of the curve transitions are linear interpolated. Wrapped up in a function its:
import numpy as np
def periodic2plot(x, y, period=np.pi*2.):
indexes = np.argwhere(np.abs(np.diff(y))>.5*period).flatten()
index_shift = 0
for i in indexes:
i += index_shift
index_shift += 3 # in every loop it adds 3 elements
if y[i] > .5*period:
x_transit = np.interp(period, np.unwrap(y[i:i+2], period=period), x[i:i+2])
add = np.ma.array([ period, 0., 0.], mask=[0,1,0])
else:
# interpolate needs sorted xp = np.unwrap(y[i:i+2], period=period)
x_transit = np.interp(0, np.unwrap(y[i:i+2], period=period)[::-1], x[i:i+2][::-1])
add = np.ma.array([ 0., 0., period], mask=[0,1,0])
x_add = np.ma.array([x_transit]*3, mask=[0,1,0])
x = np.ma.hstack((x[:i+1], x_add, x[i+1:]))
y = np.ma.hstack((y[:i+1], add, y[i+1:]))
return x, y
The code for comparison to the original answer of Thorsten Kranz with lower data-points density.
import matplotlib.pyplot as plt
x = np.linspace(0., 100., 100)
y = (x*0.03 + np.sin(x) * 0.1) % 1
#Thorsten Kranz: Make a masked array with jump points masked
abs_d_data = np.abs(np.diff(y))
mask = np.hstack([np.abs(np.diff(y))>.5, [False]])
masked_y = np.ma.MaskedArray(y, mask)
# Plot
plt.figure()
plt.plot(*periodic2plot(x, y, period=1), label='This answer')
plt.plot(x, masked_y, label='Thorsten Kranz')
plt.autoscale(enable=True, axis='both', tight=True)
plt.legend(loc=1)
plt.tight_layout()
What's the difference between scipy's optimize.fmin and optimize.leastsq? They seem to be used in pretty much the same way in this example page. The only difference I can see is that leastsq actually calculates the sum of squares on its own (as its name would suggest) while when using fmin one has to do this manually. Other than that, are the two functions equivalent?
Different algorithms underneath.
fmin is using the simplex method; leastsq is using least squares fitting.
Just to add some information, I am developing a module to fit a biexponential function and the time difference between leastsq and minimize seems to be almost 100 times. Have a look at the code below for more details.
I used a biexponential curve which is a sum of two exponents and the model function has 4 parameters to fit. S, f, D_star and D.
All default parameters for fitting were used
S [f e^(-x * D_star) + (1 - f) e^(-x * D)]
('Time taken for minimize:', 0.011617898941040039)
('Time taken for leastsq :', 0.0003180503845214844)
The code used :
import numpy as np
from scipy.optimize import minimize, leastsq
from time import time
def ivim_function(params, bvals):
"""The Intravoxel incoherent motion (IVIM) model function.
S(b) = S_0[f*e^{(-b*D\*)} + (1-f)e^{(-b*D)}]
S_0, f, D\* and D are the IVIM parameters.
Parameters
----------
params : array
parameters S0, f, D_star and D of the model
bvals : array
bvalues
References
----------
.. [1] Le Bihan, Denis, et al. "Separation of diffusion
and perfusion in intravoxel incoherent motion MR
imaging." Radiology 168.2 (1988): 497-505.
.. [2] Federau, Christian, et al. "Quantitative measurement
of brain perfusion with intravoxel incoherent motion
MR imaging." Radiology 265.3 (2012): 874-881.
"""
S0, f, D_star, D = params
S = S0 * (f * np.exp(-bvals * D_star) + (1 - f) * np.exp(-bvals * D))
return S
def _ivim_error(params, bvals, signal):
"""Error function to be used in fitting the IVIM model
"""
return (signal - ivim_function(params, bvals))
def sum_sq(params, bvals, signal):
"""Sum of squares of the errors. This function is minimized"""
return np.sum(_ivim_error(params, bvals, signal)**2)
x0 = np.array([100., 0.20, 0.008, 0.0009])
bvals = np.array([0., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 220., 240.,
260., 280., 300., 350., 400., 500., 600.,
700., 800., 900., 1000.])
data = ivim_function(x0, bvals)
optstart = time()
opt = minimize(sum_sq, x0, args=(bvals, data))
optend = time()
time_taken = optend - optstart
print("Time taken for opt:", time_taken)
lstart = time()
lst = leastsq(_ivim_error,
x0,
args=(bvals, data),)
lend = time()
time_taken = lend - lstart
print("Time taken for leastsq :", time_taken)
print('Parameters estimated using minimize :', opt.x)
print('Parameters estimated using leastsq :', lst[0])