Triple integration using using Python - python

I've been trying to solve an equation using scipy.integrate.tplquadrature but don't fully understand the notation and so don't really know how to solve the following equation. Any help would be much appreciated.
Thanks,

In your example it gave a zero integral result. I used a high value 1.e22 for inf:
from scipy import exp, pi
inf = 1.e22
from scipy.integrate import tplquad
func = lambda x,y,z: x**2 * exp(-x**2) * exp(-0.5*y*z/x)
x1,x2 = 0, pi
y1,y2 = lambda x: 0, lambda x: inf
z1,z2 = lambda x,y: 0, lambda x,y: inf
print tplquad( func, x1, x2, y1, y2, z1, z2 )
#(0.0, 0.0)
This is an example to calculate the volume of a sphere:
import scipy
from scipy.integrate import quad, dblquad, tplquad
from numpy import *
# limits for radius
r1 = 0.
r2 = 1.
# limits for theta
t1 = 0
t2 = 2*pi
# limits for phi
p1 = 0
p2 = pi
def diff_volume(p,t,r):
return r**2*sin(p)
volume = tplquad(diff_volume, r1, r2, lambda r: t1, lambda r: t2,
lambda r,t: p1, lambda r,t: p2)[0]

Related

I want to use Dirac delta as function of time in a python code to solve four coupled differential equation. I am not getting it right

I want to use Dirac delta as function of time in a python code to solve four coupled
differential equation. In the python code i am using solve_ivp to solve the coupled equation.
I am not getting it right. I am not able to define the Dirac delta function. Please help me if
anyone can. In the python code i am using solve_ivp to solve the coupled equation.
I am not getting it right. I am not able to define the Dirac delta function. Please help me if
anyone can.
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
from pylab import *
from qutip import *
import scipy.special as sp
import scipy.linalg as la
from scipy.integrate import solve_ivp
import math
import cmath
from sympy import DiracDelta, diff, pi
from scipy import signal
omg1 = 1
omg2 = 1.1
Omg=1
Omegar=10
beta=0.1
Mu=0.5
epsilon=0.01
V=1
K=0.5
g=1
m=1
hcut =1
A = 0.001
p = [ omg1, omg2, Omg,Omegar, beta, Mu, K, g,m]
#------------------------------------------------------------------------------
#####----------------Initial conditions, packed in w0--------------------------
##### IMPORTANT NOTE:
##### Please feed initial values with a complex part even if it's zero
#y1 & y2 are first derivatives of x1 and x2
x1 = 1
x2 = 0
#------------------------------------------------------------------------------
z1 = 0+0j
z2 = 1+0j
#A = 0
w0 = [x1, x2,z1,z2]
####----------------Function model passed to the ode solver--------------------
def f(t):
result = 0
for i in range(-25,25,1):
result = result + 1.0*DiracDelta(t-(i+1)*2*np.pi)
def vectorfield(t, w, omg1, omg2,Omg,Omegar, beta,Mu,K,g,m):
x1, x2,z1,z2= w
#result = 0
#for i in range(-10,10,1):
# result = result + 1.0*DiracDelta(t-(i+1)*np.pi)
field = [((g*np.sqrt(2*(x1)/(m*Omegar))*np.sin(x2))*(-0.5* abs(z2)**2 * np.cos(np.pi/3) + 0.5* np.conj(z1)* z2* np.sin(np.pi/3) + 0.5*
abs(z1)**2*np.cos(np.pi/3) + 0.5* np.conj(z2)* z1* np.sin(np.pi/3))-K*np.sin(x2)*f(t)),
((-g/np.sqrt((2*(x1)*m*Omegar))*np.cos(x2))*(-0.5* abs(z2)**2 * np.cos(np.pi/3) + 0.5* np.conj(z1)* z2* np.sin(np.pi/3) + 0.5*
abs(z1)**2*np.cos(np.pi/3) + 0.5* np.conj(z2)* z1* np.sin(np.pi/3))+x1),
((z1*(0.5*Omg+0.5*g*np.sqrt(2*x1/(m*Omegar))*np.cos(x2)*np.cos(np.pi/3))+0.5*z2*g*np.sqrt(2*x1/ (m*Omegar))*np.cos(x2)*np.sin(np.pi/3)) * -1j * (1/hcut) * (1/(cmath.sqrt(abs(z1)**2 + abs(z2)**2 )))),
(( z2*(-0.5*Omg+0.5*g*np.sqrt(2*x1/(m*Omegar))*np.cos(x2)*np.cos(np.pi/3))+ 0.5*g*np.sqrt(2*x1/(m*Omegar))*np.cos(x2)*z1*np.sin(np.pi/3)) * -1j * (1/hcut) * (1/(cmath.sqrt(abs(z1)**2 + abs(z2)**2 ))))]
#field1 = np.array(field, dtype='complex_')
#print(abs(z1)**2 + abs(z2)**2 )
print(z2)
return field
duration = 50
# time points
t = np.linspace(0, duration, 100)
abserr = 1.0e-10
relerr = 1.0e-6
#solution = odeint(vectorfield, w0, t, args=(p,))
solution = solve_ivp(vectorfield, [0, duration], w0,t_eval=t ,args=(p), atol=abserr,
rtol=relerr)
lw = 1
#'''
plot1 = plt.figure(1)
plt.style.use('seaborn-darkgrid')
plt.xlabel('time(t)')
plt.grid(True)
####----------------Plotting the oscillator dynamics---------------------------
plt.plot(t, solution.y[0,:], 'b', label='I', linewidth=lw)
plt.plot(t, solution.y[1,:], 'r', label='$\Theta$', linewidth=lw)
plt.plot(t, solution.y[2,:], 'g', label='x1(t)', linewidth=lw)
plt.plot(t, solution.y[3,:], 'orange', label='x2(t)', linewidth=lw)
plt.legend()
expect_1 = np.absolute(solution.y[2,:])**2 - np.absolute(solution.y[3,:])**2
plot2 = plt.figure(2)
plt.xlabel('time(t)')
#plt.plot(t, result, 'm', label='z_expect2', linewidth=lw)
plt.plot(t, solution.y[0,:], 'b', label='I', linewidth=lw)
plt.legend()
plt.show()

Solve Non linear ODE of predefined functions with scipy.odeint

I want to solve a non linear ordinary differential equation of the form
Theta2 = (C + j(Theta2))**-1 * (f(t) – g(Theta1) -h(Theta0))
Where f(), g(), h(), and j() are functions already defined that take Theta2, Theta1, Theta0 or t as an input. Theta2 and Theta1 are the second and first derivative of Theta0 with time t.
I have been solving the equation without the j(Theta2) term using the SciPy.odeint function using the following code:
from scipy.integrate import odeint
def ODE():
def g(Theta, t):
Theta0 = Theta[0]
Theta1 = Theta[1]
Theta2 = (1/C)*( f(t) - g(Theta1) - h(Theta0))
return Theta1, Theta2
init = 0, 0 # Initial conditions on theta0 and theta1 (velocity) at t=0
sol=odeint(g, init, t)
A = sol[:,1]
B = sol[:,0]
return(A, B)
The equation could be re-written as:
F(t, theta, theta')
theta'' = -------------------
a + b*theta''
where a and b are constants, and F corresponds to (f(t) – g(Theta1) -h(Theta0)).
It is a second order polynomial function of theta'', with 2 solutions (considering b!=0 and a^2 + 4*b*F>0) :
theta'' = -( sqrt(a^2 + 4*b*F) +/- a )/(2*b)
This new equation is of the form y' = f(t, y) which could be solved using regular ODE solver.
Here is an example using solve_ivp which is the replacement for odeint:
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
a = 20
b = 1
def f(t, y, dydt):
return t + y**2
def ode_function_plus(t, Y):
y = Y[0]
dydt = Y[1]
d2y_dt2 = -(np.sqrt(a**2 + 4*b*f(t, y, dydt)) + a )/(2*b)
return [dydt, d2y_dt2]
def ode_function_minus(t, Y):
y = Y[0]
dydt = Y[1]
d2y_dt2 = -(np.sqrt(a**2 + 4*b*f(t, y, dydt)) - a )/(2*b)
return [dydt, d2y_dt2]
# Solve
t_span = [0, 4]
Y0 = [10, 1]
sol_plus = solve_ivp(ode_function_plus, t_span, Y0)
sol_minus = solve_ivp(ode_function_minus, t_span, Y0)
print(sol_plus.message)
# Graph
plt.plot(sol_plus.t, sol_plus.y[0, :], label='solution +a');
plt.plot(sol_minus.t, sol_minus.y[0, :], label='solution -a');
plt.xlabel('time'); plt.ylabel('y'); plt.legend();

Differential Equations - ODEINT

I have to solve two differential equations by ODEINT in Python, the equations:
y''(t) = (l*q)/a * (1/y(p) * [1 - z'(p)*u]
z''(t) = a * (1/y(p) * y'(p)*u
So I was told to make:
y1=y
y2=y'
z1=z
z2=z'
and
y1' = y2
y2' = y'' = (l*q)/a * (1/y(p) * [1 - z'(p)*u]
z1' = z2
z2' = z''(t) = a * (1/y(p) * y'(p)*u
and now I have to solve these 4 equations. l, q, a, u are known.
I tried something like this:
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def rownanie(y, t, l, q, a, u):
y1, y2, z1, z2 = y
dydt = [y2, ((l*q)/a)*(1/y1)*(1-z2*u), z2, (a*y2*u)/y1]
return dydt
l = 1
q = 1
a = 10
u = 0.25
y0 = 0
z0 = 0
t = np.linspace(0, 10, 101)
sol = odeint(rownanie, y0, z0, t, args=(l,q,a,u))
print(sol)
Need help with this
If you read the docs, you'll see odeint
Solves the initial value problem for stiff or non-stiff systems of first order ode-s:
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector
This conversion is a standard mathematical way of transforming a second order ODE into a first order vector ODE.
You therefore create a new vector variable (I'll call it Y to avoid confusion), consisting of the vector Y = [y, y_prime, z, z_prime]: Your implementation of the function is correct.
Also note that in order to be solved numerically you need to specify the initial conditions of all the vector, in this case y0, z0, y'0 and z'0. As Thomas pointed out, you need to specify these values as the initial value of the vector when you call odeint.
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def rownanie(Y, t, l, q, a, u):
y1, y2, z1, z2 = Y
dydt = [y2, ((l*q)/a)*(1/y1)*(1-z2*u), z2, (a*y2*u)/y1]
return dydt
l = 1
q = 1
a = 10
u = 0.25
y0 = 0
z0 = 0
y0_prime, z0_prime = 0, 0 # you need to specify a value for these too
t = np.linspace(0, 10, 101)
sol = odeint(rownanie, [y0, y0_prime, z0, z0_prime], t, args=(l,q,a,u))
print(sol)

Solving ODE in complex domain with Python (or Matlab)

As a test for a more complicated system, I want to solve a differential equation dw/dz = w where the function w = w(z) is complex valued and z = x+iy as usual. The boundary conditions are w = i when z = i. The solution is of course complex and defined on the argand plane. I was hoping to solve this with some standard ODE solvers in python. My method is to first define a grid in the argand plane (lines of constant x and y) and then loop through each grid line and call an ODE solver on each iteration. In the below code I am attempting to integrate my differential equation between 1j and 2j, but the resulting vector of w is just 1j! Can anyone advise me what to do? Thanks
from scipy.integrate import ode
import numpy as np
from matplotlib.pylab import *
def myodeint(func, w0, z):
w0 = np.array(w0, complex)
func2 = lambda z, w: func(w, z) # odeint has these the other way :/
z0 = z[0]
solver = ode(func2).set_integrator('zvode').set_initial_value(w0, z0)
w = [solver.integrate(zp) for zp in z[1:]]
w.insert(0, w0)
return np.array(w)
def func2(w, z, alpha):
return alpha*w
if __name__ == '__main__':
# Set grid size in z plane
x_max = 3
x_min = 0
y_max = 3
y_min = 0
# Set grid resolution
dx = 0.1
dy = 0.1
# Number of nodes
x_nodes = int(np.floor((x_max-x_min)/dx)+1)
y_nodes = int(np.floor((y_max-y_min)/dy)+1)
# Create array to store value of w(z) at each node
ww = np.zeros((y_nodes,x_nodes), complex)
# Set boundary condition: w = w0 at x = x0, y = y0
x0 = 0
y0 = 1
i0 = (x0-x_min)/dx
j0 = (y_max-y0)/dy
w0 = 1j
ww[j0,i0] = w0
z0 = 1j
alpha = 1
z = np.linspace(z0, z0+1j, 200)
w = myodeint(lambda w, z: func2(w, z, alpha), [w0, 0, 0], z)

How do I put a constraint on SciPy curve fit?

I'm trying to fit the distribution of some experimental values with a custom probability density function. Obviously, the integral of the resulting function should always be equal to 1, but the results of simple scipy.optimize.curve_fit(function, dataBincenters, dataCounts) never satisfy this condition.
What is the best way to solve this problem?
You can define your own residuals function, including a penalization parameter, like detailed in the code below, where it is known beforehand that the integral along the interval must be 2.. If you test without the penalization you will see that what your are getting is the conventional curve_fit:
import matplotlib.pyplot as plt
import scipy
from scipy.optimize import curve_fit, minimize, leastsq
from scipy.integrate import quad
from scipy import pi, sin
x = scipy.linspace(0, pi, 100)
y = scipy.sin(x) + (0. + scipy.rand(len(x))*0.4)
def func1(x, a0, a1, a2, a3):
return a0 + a1*x + a2*x**2 + a3*x**3
# here you include the penalization factor
def residuals(p, x, y):
integral = quad(func1, 0, pi, args=(p[0], p[1], p[2], p[3]))[0]
penalization = abs(2.-integral)*10000
return y - func1(x, p[0], p[1], p[2], p[3]) - penalization
popt1, pcov1 = curve_fit(func1, x, y)
popt2, pcov2 = leastsq(func=residuals, x0=(1., 1., 1., 1.), args=(x, y))
y_fit1 = func1(x, *popt1)
y_fit2 = func1(x, *popt2)
plt.scatter(x, y, marker='.')
plt.plot(x, y_fit1, color='g', label='curve_fit')
plt.plot(x, y_fit2, color='y', label='constrained')
plt.legend()
plt.xlim(-0.1, 3.5)
plt.ylim(0, 1.4)
print('Exact integral:', quad(sin, 0, pi)[0])
print('Approx integral1:', quad(func1, 0, pi, args=(popt1[0], popt1[1], popt1[2], popt1[3]))[0])
print('Approx integral2:', quad(func1, 0, pi, args=(popt2[0], popt2[1], popt2[2], popt2[3]))[0])
plt.show()
#Exact integral: 2.0
#Approx integral1: 2.60068579748
#Approx integral2: 2.00001911981
Other related questions:
SciPy LeastSq Goodness of Fit Estimator
Here is an almost-identical snippet which makes only use of curve_fit.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
import scipy.integrate as integr
x = np.linspace(0, np.pi, 100)
y = np.sin(x) + (0. + np.random.rand(len(x))*0.4)
def Func(x, a0, a1, a2, a3):
return a0 + a1*x + a2*x**2 + a3*x**3
# modified function definition with Penalization
def FuncPen(x, a0, a1, a2, a3):
integral = integr.quad( Func, 0, np.pi, args=(a0,a1,a2,a3))[0]
penalization = abs(2.-integral)*10000
return a0 + a1*x + a2*x**2 + a3*x**3 + penalization
popt1, pcov1 = opt.curve_fit( Func, x, y )
popt2, pcov2 = opt.curve_fit( FuncPen, x, y )
y_fit1 = Func(x, *popt1)
y_fit2 = Func(x, *popt2)
plt.scatter(x,y, marker='.')
plt.plot(x,y_fit2, color='y', label='constrained')
plt.plot(x,y_fit1, color='g', label='curve_fit')
plt.legend(); plt.xlim(-0.1,3.5); plt.ylim(0,1.4)
print 'Exact integral:',integr.quad(np.sin ,0,np.pi)[0]
print 'Approx integral1:',integr.quad(Func,0,np.pi,args=(popt1[0],popt1[1],
popt1[2],popt1[3]))[0]
print 'Approx integral2:',integr.quad(Func,0,np.pi,args=(popt2[0],popt2[1],
popt2[2],popt2[3]))[0]
plt.show()
#Exact integral: 2.0
#Approx integral1: 2.66485028754
#Approx integral2: 2.00002116217
Following the example above here is more general way to add any constraints:
from scipy.optimize import minimize
from scipy.integrate import quad
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, np.pi, 100)
y = np.sin(x) + (0. + np.random.rand(len(x))*0.4)
def func_to_fit(x, params):
return params[0] + params[1] * x + params[2] * x ** 2 + params[3] * x ** 3
def constr_fun(params):
intgrl, _ = quad(func_to_fit, 0, np.pi, args=(params,))
return intgrl - 2
def func_to_minimise(params, x, y):
y_pred = func_to_fit(x, params)
return np.sum((y_pred - y) ** 2)
# Do the parameter fitting
#without constraints
res1 = minimize(func_to_minimise, x0=np.random.rand(4), args=(x, y))
params1 = res1.x
# with constraints
cons = {'type': 'eq', 'fun': constr_fun}
res2 = minimize(func_to_minimise, x0=np.random.rand(4), args=(x, y), constraints=cons)
params2 = res2.x
y_fit1 = func_to_fit(x, params1)
y_fit2 = func_to_fit(x, params2)
plt.scatter(x,y, marker='.')
plt.plot(x, y_fit2, color='y', label='constrained')
plt.plot(x, y_fit1, color='g', label='curve_fit')
plt.legend(); plt.xlim(-0.1,3.5); plt.ylim(0,1.4)
plt.show()
print(f"Constrant violation: {constr_fun(params1)}")
Constraint violation: -2.9179325622408214e-10
If you are able normalise your probability fitting function in advance then you can use this information to constrain your fit. A very simple example of this would be fitting a Gaussian to data. If one were to fit the following three-parameter (A, mu, sigma) Gaussian then it would be unnormalised in general:
however, if one instead enforces the normalisation condition on A:
then the Gaussian is only two parameter and is automatically normalised.
You could ensure that your fitted probability distribution is normalised via a numerical integration. For example, assuming that you have data x and y and that you have defined an unnormalised_function(x, a, b) with parameters a and b for your probability distribution, which is defined on the interval x1 to x2 (which could be infinite):
from scipy.optimize import curve_fit
from scipy.integrate import quad
# Define a numerically normalised function
def normalised_function(x, a, b):
normalisation, _ = quad(lambda x: unnormalised_function(x, a, b), x1, x2)
return unnormalised_function(x, a, b)/normalisation
# Do the parameter fitting
fitted_parameters, _ = curve_fit(normalised_function, x, y)

Categories

Resources