Python: Plotting an integral function - python

I saw there are some similar questions around here but none of them seems to really get to my problem.
This is my code:
import numpy as np
from matplotlib import pyplot as mp
from scipy.integrate import *
deltat = 30.0e-6
ku = 50.0e-12
pant = 0.05
csc = 200.0e-12
tausc = 150.0e-6
def int1(tp, tausc, csc, t):
return (1/csc)*(np.exp(-tp/tausc))
def deltafisc(t):
return quad(int1, 0, np.inf, args=(tausc, csc, t))
t = np.linspace(-2.0, 2.0, num=100)*1e-4
mp.plot(t,deltafisc(t))
mp.show()
The function works, but when I try to plot it report a mistake.

Related

ValueError: Residuals are not finite in the initial point. with scipy least square

import originpro as op
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def circle(ReZ,R1,R2):
nImZ=(ReZ-R1)*np.sqrt(R2/(ReZ-R1)-1)
return nImZ
def Fit(func,used_x,used_y,bound,yerror):
fit_para,fit_cov=curve_fit(func,used_x,used_y,bounds=bound,sigma=yerror,absolute_sigma=True)
print(fit_para)
return fit_para
def yerror(ydata, p):
output=[]
for i in ydata:
data=i*p
output.append(data)
return output
def PrePlot(exp_x,exp_y,fit_x,fit_y):
fig,ax=plt.subplots()
fitline=ax.plot(fit_x,fit_y,marker='.',color='blue')
expline=ax.scatter(exp_x,exp_y,s=20,marker='.',color='red')
ax.legend(['Experimental','Fitted line'])
plt.show()
wkslist=range(83,109)
for wksnum in wkslist:
wks=op.find_sheet('w','Book'+str(wksnum))
xdata=wks.to_list(1)
ydata=wks.to_list(2)
bound=([3000,1*10**8],[6000,1*10**9])
error=yerror(ydata, 0.1)
fitRes=Fit(circle,xdata,ydata,bound,error)
fit_x=xdata
fit_y=[circle(x,fitRes[0],fitRes[1]) for x in fit_x]
PrePlot(xdata,ydata,fit_x,fit_y)
I get the error for the second worksheet(wks)
for the function used (circle), the condition is: when ReZ>R1, R1+R2>=ReZ; when ReZ<R1, R1+R2<=ReZ
The maximum ReZ in the xdata is 1.35e8, the minimum is 4353, which fits that condition with the given boundary.
So I'm really confused here

Is there a multiple integrator in Python providing both variable integration limits (like scipy) and high precision (like mpmath)?

I can use scipy quad and nquad for a quadruple integration involving variable integration limits. The problem is that the default precision used raises an Error when the tolerance requested cannot be achieved. With mpmath integrator, I can define any arbitrary precision with setting mp.dps = arbitrary, but I can't see if and how the limits can become variable like with nquad. Mpmath also provides a very fast execution with Gauss-Legendre method in quadgl, which is highly desirable, because my function is smooth, but takes an exorbitant amount of time with scipy to complete four integrations. Please help.
The below is only a simple function that fails my goal:
from datetime import datetime
import scipy
from scipy.special import jn, jn_zeros
import numpy as np
import matplotlib.pyplot as plt
from mpmath import *
from mpmath import mp
from numpy import *
from scipy.optimize import *
# Set the precision
mp.dps = 15#; mp.pretty = True
# Setup shortcuts, so we can just write exp() instead of mp.exp(), etc.
F = mp.mpf
exp = mp.exp
sin = mp.sin
cos = mp.cos
asin = mp.asin
acos = mp.acos
sqrt = mp.sqrt
pi = mp.pi
tan = mp.tan
start = datetime.now()
print(start)
#optionsy={'limit':100, 'epsabs':1.49e-1, 'epsrel':1.49e-01}
#optionsx={'limit':100, 'epsabs':1.49e-1, 'epsrel':1.49e-01}
def f(x,y,z):
return 2*sqrt(1-x**2) + y**2.0 + z
def rangex(y,z):
return [-1,1]
def rangey(z):
return [1,2]
def rangez():
return [2,3]
def result():
return quadgl(f, rangex, rangey, rangez)
"""
#The below works:
def result():
return quadgl(f, [-1,1], [1,2], [2,3])
"""
print(result())
end = datetime.now()
print(end-start)
Ok, let me put something in answer, hard to put code in the comments
Simple optimization with MP math is to follow simple rules:
y2.0 is VERY expensive (log, exp, ...), replace with y*y
y2 is still expensive, replace with y*y
multiplication is a lot more expensive than summation, replace x*y + y**2.0 with (x+y)*y
Division is more expensive than multiplication, replace y/4 with 0.25*y
Code, Win 10 x64, Python 3.8
def f3():
def f2(x):
def f1(x,y):
def f(x,y,z):
return 1.0 + (x+y)*y + 3.0*z
return mpmath.quadgl(f, [-1.0, 1], [1.2*x, 1.0], [0.25*y, x*x])
return mpmath.quadgl(f1, [-1, 1.0], [1.2*x, 1.0])
return mpmath.quadgl(f2, [-1.0, 1.0])
on my computer went from 12.9 sec to 10.6 sec, about 20% off
Below is a simple example of how I can do only triple integration with mpmath. This does not address high precision with four integrations. In any case, execution time is even a bigger problem. Any help welcome.
from datetime import datetime
import scipy
import numpy as np
from mpmath import *
from mpmath import mp
from numpy import *
# Set the precision
mp.dps = 20#; mp.pretty = True
# Setup shortcuts, so we can just write exp() instead of mp.exp(), etc.
F = mp.mpf
exp = mp.exp
sin = mp.sin
cos = mp.cos
asin = mp.asin
acos = mp.acos
sqrt = mp.sqrt
pi = mp.pi
tan = mp.tan
start = datetime.now()
print('start: ',start)
def f3():
def f2(x):
def f1(x,y):
def f(x,y,z):
return 1.0 + x*y + y**2.0 + 3.0*z
return quadgl(f, [-1.0, 1], [1.2*x, 1.0], [y/4, x**2.0])
return quadgl(f1, [-1, 1.0], [1.2*x, 1.0])
return quadgl(f2, [-1.0, 1.0])
print('result =', f3())
end = datetime.now()
print('duration in mins:',end-start)
#start: 2020-08-19 17:05:06.984375
#result = 5.0122222222222221749
#duration: 0:01:35.275956
Furthermore, an attempt to combine one (first) scipy integration followed by a triple mpmath integrator does not seem to produce any output for more than 24 hours even with a simplest function. What is wrong with the following code?
from datetime import datetime
import scipy
import numpy as np
from mpmath import *
from mpmath import mp
from numpy import *
from scipy import integrate
# Set the precision
mp.dps = 15#; mp.pretty = True
# Setup shortcuts, so we can just write exp() instead of mp.exp(), etc.
F = mp.mpf
exp = mp.exp
sin = mp.sin
cos = mp.cos
asin = mp.asin
acos = mp.acos
sqrt = mp.sqrt
pi = mp.pi
tan = mp.tan
start = datetime.now()
print('start: ',start)
#Function to be integrated
def f(x,y,z,w):
return 1.0 + x + y + z + w
#Scipy integration:FIRST INTEGRAL
def f0(x,y,z):
return integrate.quad(f, -20, 10, args=(x,y,z), epsabs=1.49e-12, epsrel=1.4e-8)[0]
#Mpmath integrator of function f0(x,y,z): THREE OUTER INTEGRALS
def f3():
def f2(x):
def f1(x,y):
return quadgl(f0, [-1.0, 1], [-2, x], [-10, y])
return quadgl(f1, [-1, 1.0], [-2, x])
return quadgl(f2, [-1.0, 1.0])
print('result =', f3())
end = datetime.now()
print('duration:', end-start)
Below is the full code, for which the original question was raised. It contains the use of scipy to carry out four integrations:
# Imports
from datetime import datetime
import scipy.integrate as si
import scipy
from scipy.special import jn, jn_zeros
from scipy.integrate import quad
from scipy.integrate import nquad
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import fixed_quad
from scipy.integrate import quadrature
from mpmath import mp
from numpy import *
from scipy.optimize import *
# Set the precision
mp.dps = 30
# Setup shortcuts, so we can just write exp() instead of mp.exp(), etc.
F = mp.mpf
exp = mp.exp
sin = mp.sin
cos = mp.cos
asin = mp.asin
acos = mp.acos
sqrt = mp.sqrt
pi = mp.pi
tan = mp.tan
start = datetime.now()
print(start)
R1 = F(6.37100000000000e6)
k1 = F(8.56677817058932e-8)
R2 = F(1.0)
k2 = F(5.45789437248245e-01)
r = F(12742000.0)
#Replace computed initial constants with values presuming is is faster, like below:
#a2 = R2/r
#print(a2)
a2 = F(0.0000000784806152880238581070475592529)
def u1(phi2):
return r*cos(phi2)-r*sqrt(a2**2.0-(sin(phi2))**2.0)
def u2(phi2):
return r*cos(phi2)+r*sqrt(a2**2.0-(sin(phi2))**2.0)
def om(u,phi2):
return u-r*cos(phi2)
def mp2(phi2):
return r*sin(phi2)
def a1(u):
return R1/u
optionsx={'limit':100, 'epsabs':1.49e-14, 'epsrel':1.49e-11}
optionsy={'limit':100, 'epsabs':1.49e-14, 'epsrel':1.49e-10}
#---- in direction u
def a1b1_u(x,y,u):
return 2.0*u*sqrt(a1(u)**2.0-(sin(y))**2.0)
def oa2_u(x,y,u,phi2):
return (mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*cos(y)
- sqrt((mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*(cos(y)))**2.0
+ R2**2.0-om(u,phi2)**2.0-mp2(phi2)**2.0))
def ob2_u(x,y,u,phi2):
return (mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*cos(y)
+ sqrt((mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*(cos(y)))**2.0
+ R2**2.0-om(u,phi2)**2.0-mp2(phi2)**2.0))
def func1_u(x,y,u,phi2):
return (-exp(-k1*a1b1_u(x,y,u)-k2*ob2_u(x,y,u,phi2))+exp(+k2*oa2_u(x,y,u,phi2)))*sin(y)*cos(y)
#--------joint_coaxial integration: u1
def fg_u1(u,phi2):
return nquad(func1_u, [[-pi, pi], [0, asin(a1(u))]], args=(u,phi2), opts=[optionsx,optionsy])[0]
#Constants to be used for normalization at the end or in the interim inegrals if this helps adjust values for speed of execution
piA1 = pi*(R1**2.0-1.0/(2.0*k1**2.0)+exp(-2.0*k1*R1)*(2.0*k1*R1+1.0)/(2.0*k1**2.0))
piA2 = pi*(R2**2.0-1.0/(2.0*k2**2.0)+exp(-2.0*k2*R2)*(2.0*k2*R2+1.0)/(2.0*k2**2.0))
#----THIRD integral of u1
def third_u1(u,phi2):
return fg_u1(u,phi2)*u**2.0
def third_u1_I(phi2):
return quad(third_u1, u1(phi2), u2(phi2), args = (phi2), epsabs=1.49e-20, epsrel=1.49e-09)[0]
#----FOURTH integral of u1
def fourth_u1(phi2):
return third_u1_I(phi2)*sin(phi2)*cos(phi2)
def force_u1():
return quad(fourth_u1, 0.0, asin(a2), args = (), epsabs=1.49e-20, epsrel=1.49e-08)[0]
force_u1 = force_u1()*r**2.0*2.0*pi*k2/piA1/piA2
print('r = ', r, 'force_u1 =', force_u1)
end = datetime.now()
print(end)
args = {
'p':r,
'q':force_u1,
'r':start,
's':end
}
#to txt file
f=open('Sphere-test-force-u-joint.txt', 'a')
f.write('\n{p},{q},{r},{s}'.format(**args))
#f.flush()
f.close()
I am interested in setting the epsrel sufficiently low, depending on the case. The epsabs is generally unknown apriori, so I understand that I should make it very low to avoid it taking hold of the output, in which case it introduces an computational articact. When I make it lower, an Error warning is raised that the round-off errors are significant and the total error may be underestimated for the desired tolerance to be achieved.
Whilst the question is not about speed, the latter is intimately connected with making practical the execution of a quadruple integration prior to the inquiry about precision and tolerance. To test the speed, I set (increased) all four epsrel=1e-02, which reduced the time of the original code down to 2:14 (hours). Then I simplified powers per Severin and implemented some memoization. These reduced the time cumulatively down to 1:29 (hours). The edited lines of the code are provided here:
from memoization import cached
#cached(ttl=10)
def u1(phi2):
return r*cos(phi2)-r*sqrt(a2*a2-sin(phi2)*sin(phi2))
#cached(ttl=10)
def u2(phi2):
return r*cos(phi2)+r*sqrt(a2*a2-sin(phi2)*sin(phi2))
#cached(ttl=10)
def om(u,phi2):
return u-r*cos(phi2)
#cached(ttl=10)
def mp2(phi2):
return r*sin(phi2)
#cached(ttl=10)
def a1(u):
return R1/u
optionsx={'limit':100, 'epsabs':1.49e-14, 'epsrel':1.49e-02}
optionsy={'limit':100, 'epsabs':1.49e-14, 'epsrel':1.49e-02}
def a1b1_u(x,y,u):
return 2.0*u*sqrt(a1(u)*a1(u)-sin(y)*sin(y))
def oa2_u(x,y,u,phi2):
return (mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*cos(y)
- sqrt((mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*(cos(y)))**2.0
+ 1.0-om(u,phi2)*om(u,phi2)-mp2(phi2)*mp2(phi2)))
def ob2_u(x,y,u,phi2):
return (mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*cos(y)
+ sqrt((mp2(phi2)*sin(y)*cos(x)+om(u,phi2)*(cos(y)))**2.0
+ 1.0-om(u,phi2)*om(u,phi2)-mp2(phi2)*mp2(phi2)))
def third_u1(u,phi2):
return fg_u1(u,phi2)*u*u
def third_u1_I(phi2):
return quad(third_u1, u1(phi2), u2(phi2), args = (phi2), epsabs=1.49e-20, epsrel=1.49e-02)[0]
def force_u1():
return quad(fourth_u1, 0.0, asin(a2), args = (), epsabs=1.49e-20, epsrel=1.49e-02)[0]
However, the output is an artifact caused by the inadequate tolerance introduced. I can progressively set the epsrel to lower values and see if the result converges to a realistic value in realistic time with the available scipy precision. Hope this illustrates the original question much better.

Explicitly calculate convolution in Python

I want to calculate a convolution in Python by explicitly evaluating the integral
and comparing the result with what I get from fftconvolve. The integral would be calculated using quad:
import numpy as np
from scipy.integrate import quad
from scipy.signal import fftconvolve
import matplotlib.pyplot as plt
from sympy import symbols
def f(x,a,b):
return np.exp(-(x-a)**2/b)
def g(x,a,b):
return np.exp(-(x-np.pi*a)**2/(2.9*b))
x = symbols('x')
a = 1.2
b = 4.7
t = np.linspace(-100,100,int(1e4))
dt = t[1] - t[0]
h1 = fftconvolve(f(t,a,b),g(t,a,b),mode='same')*dt
h2,_ = quad(f(t,a,b)*g(x-t,a,b),-np.inf,np.inf,epsabs=0,epsrel=1e-6,args=(a,b))
x = np.linspace(-100,100,int(1e4))
plt.figure()
plt.plot(t,h1,label='fftconvolve')
plt.plot(x,h2,label='brute force')
plt.legend()
plt.show()
I keep getting the error AttributeError: 'Mul' object has no attribute 'exp' which refers to the line h2,_ = quad(... when it is called by quad.
What does this error mean and is this an appropriate way to use quad to evaluate the integral?

Solving nonlinear differential first order equations using Python

I would like to solve a nonlinear first order differential equation using Python.
For instance,
df/dt = f**4
I wrote the following program, but I have an issue with matplotlib, so I don't know if the method I used with scipy is correct.
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
derivate=lambda f,t: f**4
f0=10
t=np.linspace(0,2,100)
f_numeric=scipy.integrate.odeint(derivate,f0,t)
print(f_numeric)
plt.plot(t,f_numeric)
plt.show()
Which results in the following error:
AttributeError: 'float' object has no attribute 'rint'
In this case, you might be better of using Sympy, which allows you to obtain the closed form solutions:
from IPython.display import display
import sympy as sy
from sympy.solvers.ode import dsolve
import matplotlib.pyplot as plt
import numpy as np
sy.init_printing() # LaTeX like pretty printing for IPython
t = sy.symbols("t", real=True)
f = sy.symbols("f", function=True)
eq1 = sy.Eq(f(t).diff(t), f(t)**4) # the equation
sls = dsolve(eq1) # solvde ODE
# print solutions:
print("For ode")
display(eq1)
print("the solutions are:")
for s in sls:
display(s)
# plot solutions:
x = np.linspace(0, 2, 100)
fg, axx = plt.subplots(2, 1)
axx[0].set_title("Real part of solution of $\\frac{d}{dt}f(t)= (f(t))^4$")
axx[1].set_title("Imag. part of solution of $\\frac{d}{dt}f(t)= (f(t))^4$")
fg.suptitle("$C_1=0.1$")
for i, s in enumerate(sls, start=1):
fn1 = s.rhs.subs("C1", .1) # C_1 -> 1
fn2 = sy.lambdify(t, fn1, modules="numpy") # make numpy function
y = fn2(x+0j) # needs to be called with complex number
axx[0].plot(x, np.real(y), label="Sol. %d" % i)
axx[1].plot(x, np.imag(y), label="Sol. %d" % i)
for ax in axx:
ax.legend(loc="best")
ax.grid(True)
axx[0].set_ylabel("Re$\\{f(t)\\}$")
axx[1].set_ylabel("Im$\\{f(t)\\}$")
axx[-1].set_xlabel("$t$")
fg.canvas.draw()
plt.show()
In an IPython shell, you should see the following:

solve an integral equation by python

I need to solve an integral equation by python 3.2 in win7.
I want to find an initial guess solution first and then use "fsolve()" to solve it in python.
This is the code:
import numpy as np
from scipy.optimize.minpack import fsolve
from cmath import cos, exp
from scipy.integrate.quadpack import quad
def integrand2(x, b):
return exp(-x)/b
def intergralFunc2(b):
integral,err = quad(integrand2, 0, 10, args=(b)) // **error here**
return 0.01 - integral
import matplotlib.pyplot as plt
def findGuess():
vfunc = np.vectorize(intergralFunc2)
f = np.linspace(-20, 20,10)
plt.plot(f, vfunc(f))
plt.xlabel('guess value')
plt.show()
def solveFunction():
y= fsolve(intergralFunc2, 10)
return y
if __name__ == '__main__':
findGuess()
solution = solveFunction()
print("solution is ", solution)
I got error:
quadpack.error: Supplied function does not return a valid float.
Any help would be appreciated.
Just made the following change and it should work (it worked for me).
remove:
from cmath import exp, cos
include:
from numpy import exp, cos
as explained in the comments, the cmath functions accept only float inputs, not arrays.

Categories

Resources