I’m experiencing a weird behaviour with this code. I would expect that w_numba == w but this is not the case. if Nx <6 the results are the same but for higher number the numba compiled method returns nan.
Could you please help me?
Many thanks
Regards
import numpy as np
from numba import njit
#njit
def dp_formula_with_numba(a: np.ndarray, b: float, x: np.ndarray):
""" formula dp """
term1 = np.arcsinh((b - 1) / (a ** .5))
return ((1 - b) * np.arcsinh(b / (a ** .5)) - ((b ** 2 - 2 * b + a + 1) ** .5) + ((b ** 2 + a) ** .5) +
term1 * (b - 1)) * ((1 - x ** 2) ** .5)
def dp_formula(a, b, x):
""" formula dp """
term1 = np.arcsinh((b - 1) / (a ** .5))
return ((1 - b) * np.arcsinh(b / (a ** .5)) - ((b ** 2 - 2 * b + a + 1) ** .5) + ((b ** 2 + a) ** .5) +
term1 * (b - 1)) * ((1 - x ** 2) ** .5)
Nx = 8
Nint = int(Nx/2)
XX1 = np.arange(0, Nint + 1) / Nint
XX1[0] = 0.0025
XX1[Nint] = 0.9999
b_int = 0
a_int = (0.25 * XX1) ** 2
print("a: " + str(a_int) + " dtype:" + str(a_int.dtype) + " type:" + str(type(a_int)))
print("b: " + str(b_int) + " type:" + str(type(b_int)))
print("x: " + str(XX1) + " dtype:" + str(XX1.dtype) + " type:" + str(type(XX1)))
w_numba = dp_formula_with_numba(a_int, b_int, XX1)
w = dp_formula(a_int, b_int, XX1)
print("")
print('With numba: ' + str(w_numba))
print('With numpy: ' + str(w))
Results:
a_int: [3.90625000e-07 3.90625000e-03 1.56250000e-02 3.51562500e-02 6.24875006e-02] dtype:float64 type:<class 'numpy.ndarray'>
b_int: 0 type:<class 'int'>
XX1: [0.0025 0.25 0.5 0.75 0.9999] dtype:float64 type:<class 'numpy.ndarray'>
With numba: [7.07150889 2.4470088 nan nan 0.0185825 ]
With numpy: [7.07150889 2.4470088 1.6399837 1.0224987 0.0185825 ]
I did an even simpler test:
import numpy as np
from numba import njit
#njit
def testing_with_numba(a: np.ndarray, b: float):
""" formula dp """
q = a**.5
print((b - 1) / q)
print((b - 1) / (a**.5))
a_int = np.arange(4)+1
b_int = 0
print("a_int: " + str(a_int) + " dtype:" + str(a_int.dtype) + " type:" + str(type(a_int)))
print("b_int: " + str(b_int) + " type:" + str(type(b_int)))
testing_with_numba(a_int, b_int)
and this is what gets printed:
a_int: [1 2 3 4] dtype:int32 type:<class 'numpy.ndarray'>
b_int: 0 type:<class 'int'>
[-1. -0.70710678 -0.57735027 -0.5 ]
[-1. -0.70710678 nan nan]
I tried with both python3.6 + numba 0.53.1 + numpy 1.19.5, and python3.8 + numba 0.51.2 + numpy 1.19.2 on Windows 7 and got the same results.
I then ran the same code in the same environment (Anaconda3-2020.11-Windows-x86_64, python3.8 + numba 0.51.2 + numpy 1.19.2 ) but on windows 10 and it works fine....
Related
I needed to solve $sin(x)+a*sin(3x)=0$ symbolically for $x$.
import sympy as sp
a,x = sp.symbols('a,x')
roots = sp.solve([sp.sin(x)+a*sp.sin(3*x)],[x])
print(roots)
produced
(I*(-log((-a - sqrt(-3*a**2 + 2*a + 1) - 1)/a) + log(2))/2,),
(I*(-log(-sqrt((-a + sqrt(-3*a**2 + 2*a + 1) - 1)/a)) + log(2)/2),),
(I*(-log((-a + sqrt(-3*a**2 + 2*a + 1) - 1)/a) + log(2))/2,)]
whereas WolframAlpha produced a much "better" solution:
solve sin(x) + a sin(3 x) = 0
yields
x = π n and n element Z
x = 2 π n and a = -1/3 and n element Z
x = 2 π n - 2 tan^(-1)(sqrt((5 a - 4 sqrt((a - 1) a) - 1)/(3 a + 1))) and 3 a + 1!=0 and sqrt((a - 1) a)!=2 a and n element Z
x = 2 (tan^(-1)(sqrt((5 a - 4 sqrt((a - 1) a) - 1)/(3 a + 1))) + π n) and 3 a + 1!=0 and sqrt((a - 1) a)!=2 a and n element Z
x = 2 π n - 2 tan^(-1)(sqrt((5 a + 4 sqrt((a - 1) a) - 1)/(3 a + 1))) and 3 a + 1!=0 and 2 a + sqrt((a - 1) a)!=0 and n element Z
Question:
can the SymPy solver be configured to produce solutions in the way WolframAlpha does?
Is anything known why SymPy expresses the solutions via (complex) logarithms instead of arc tangents?
Im trying to minimize function(T) by scipy.optimize.minimize().
With 0 < T < 1.
My code is the following:
import scipy
import math
def function(T):
'''
U = (1- sqrt(T)^(-2)
1 - T^3 * (1 - T - U) ^ 5 * U ^ 10
'''
return 1 - T**3 * (1-T - (T + 1 + 2 * math.sqrt(T))/(T-1)**2) ** 5 * ((T + 1 + 2 * math.sqrt(T))/(T-1)**2)**10
# minimization
res = scipy.optimize.minimize(function, 1, bounds= [(0,1)])
w = res.x
When I run the code it shows the error messege:
RuntimeWarning: divide by zero encountered in true_divide
return 1 - T**x[0] * (1-T - (T + 1 + 2 * math.sqrt(T))/(T-1)**2) ** x[1] * ((T + 1 + 2 * math.sqrt(T))/(T-1)**2)**x[2]
RuntimeWarning: overflow encountered in multiply
return 1 - T**x[0] * (1-T - (T + 1 + 2 * math.sqrt(T))/(T-1)**2) ** x[1] * ((T + 1 + 2 * math.sqrt(T))/(T-1)**2)**x[2]
What is going on? Thanks.
PS: what does the error messages mean?
I'm coding an algorithm that takes the inverse of a Laplace transform, iterates it until it converges and live-plots the graph. I've dealt with a bunch of errors so far and solved them all but on this one, I'm stumped. Here's the function f_p() that I'm getting the error from. For context, it contains 6 Laplace transforms that I painstakingly derived by hand to eliminate the imaginary component:
def f_p(u, omega, m):
a = gamma
b = (omega + k * math.pi) / u
if m == 1:
f = a * (1 / (a ** 2 + (b + 1) ** 2) + 1 / (a ** 2 + (b - 1) ** 2))
return f * math.cos(omega)
elif m == 2:
f = a / (a ** 2 + b ** 2)
return f * math.cos(omega)
elif m == 3:
f = math.e ** (-a / (a ** 2 + b ** 2)) * (a ** 2 + b ** 2) ** (1 / 4) * math.cos(
(math.atan2(b, a) + 2 * n * math.pi) / 2) * math.cos(b / (a ** 2 + b ** 2)) / (
math.sqrt(a ** 2 + b ** 2) * (math.cos((math.atan2(b, a) + 2 * n * math.pi) / 2) ** 2 + math.sin(
(math.atan2(b, a) + 2 * n * math.pi) / 2)))
return f * math.cos(omega)
elif m == 4:
a = math.e
f = -(a * math.log(a ** 2 + b ** 2) + a ** 2) / (a ** 2 + b ** 2)
return f * math.cos(omega)
elif m == 5:
f = a * (math.e ** (-a) * math.cos(b) - math.e ** (-2 * a) * math.cos(2 * b)) / (a ** 2 + b ** 2)
return f * math.cos(omega)
elif m == 6:
f = math.sqrt((a ** 2 - b ** 2 + 1) ** 2 + (2 * a * b) ** 2) * math.cos(
(math.atan2(2 * a * b, a ** 2 - b ** 2 + 1) + 2 * n * math.pi) / 2) / (
((a ** 2 - b ** 2 + 1) ** 2 + (2 * a * b) ** 2) * (
math.cos((math.atan2(2 * a * b, a ** 2 - b ** 2 + 1) + 2 * n * math.pi) / 2) ** 2 + math.sin(
(math.atan2(2 * a * b, a ** 2 - b ** 2 + 1) + 2 * n * math.pi) / 2) ** 2))
return f * math.cos(omega)
else:
return 0
This function feeds into another function f_u(), which contains the algorithm itself, which integrates over omega, removing it as a variable. The remaining variable is u, which I need to iterate until the function converges. Here's the execution of the algorithm:
u = np.linspace(0.000001, 100, 100000)
if __name__ == '__main__':
pool = Pool(processes=4)
for m in range(1, 7):
if m == 4:
gamma = math.e
else:
gamma = 10
pool.map(f_u, u)
Maybe it could be a syntax problem? I don't know. Any help would be much appreciated.
I am intending to take a list of random variables and alter a previous list in each column by said random variables. However, for the purpose of my function, each variable must be used in a Gamma function as well as integrated.
x[t] = c * (1 / (2 ** (v / 2) + test[t - 1]) * (gamma((v / 2) + test[t - 1]))) * integrate.\
quad(lambda h: np.exp(-h / 2) * h ** ((v / 2) + test[t - 1] - 1), 0, np.inf)
x[ t ] is an np.zeros((x , y)) list, and test[t - 1] is an np.zeros((x - 1, y)) list
I have filled test[ ] with the appropriate random variables, but I am unable to pass them through this equation to complete the columns of row [ t ] in x
When I try to run my current code, I receive:
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_64\lib\site-packages\scipy\integrate\quadpack.py", line 450, in _quad
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
TypeError: only size-1 arrays can be converted to Python scalars
Is there a different special function which allows me to use each column's variable to solve for my desired x[ t ]?
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import stats
import mpmath as mp
import scipy.integrate as integrate
from scipy.special import gamma
T = 1
beta = 0.5
x0 = 0.05
q = 0
mu = x0 - q
alpha = - (2 - beta) * mu
sigma0 = 0.1
sigma = (2 - beta) * sigma0
b = - ((1 - beta) / (2 * mu) * (sigma0 ** 2))
simulations = 100
M = 50
dt = T / M
def srd_sampled_nxc2():
x = np.zeros((M + 1, simulations))
x[0] = x0
test = np.zeros((M, simulations))
for t in range(1, M + 1):
v = 4 * b * alpha / sigma ** 2
c = (sigma ** 2 * (1 - np.exp(-alpha * dt))) / (4 * alpha)
nc = np.exp(-alpha * dt) / c * x[t - 1]
if v > 1:
x[t] = c * ((np.random.standard_normal(simulations) + nc ** 0.5) ** 2 + mp.nsum(
lambda i: np.random.standard_normal(simulations) ** 2, [0, v - 1]))
else:
max_array = []
nc_over_2 = [l / 2 for l in nc]
for p in range(simulations):
sump = []
poisson_start = 0
while poisson_start <= 1:
x_i = sum(-np.log(np.random.uniform(0, 1, simulations)) / nc_over_2)
sump.append(
x_i
)
poisson_start += x_i
x_n = max(sump)
max_array.append(
x_n
)
sump = []
test[t - 1] = max_array
x[t] = c * (1 / (2 ** ((v / 2) + test[t - 1])) * (gamma((v / 2) + test[t - 1]))) * integrate.\
quad(lambda h: np.exp(-h / 2) * h ** ((v / 2) + test[t - 1] - 1), 0, np.inf)
max_array = []
return x
Ultimately ended up finding a workaround which is simple to implement:
else:
max_array = []
for p in range(simulations):
k = nc[t - 1, p]
lam = k / 2
poisson_samp = 0
while poisson_samp <= 1:
x_i = -math.log(np.random.uniform(0, 1)) / lam
max_array.append(
x_i
)
poisson_samp += x_i
test[t - 1, p] = len(max_array) - 1
max_array.clear()
for f in range(simulations):
n = test[t - 1, f]
z = integrate.quad(lambda h: np.exp(-h / 2) * h ** ((v / 2) + n - 1), 0, 1)
new[t - 1, f] = z[0]
x[t] = c * (1 / (2 ** ((v / 2) + test[t - 1]) * (gamma((v / 2) + test[t - 1]))) * new[0])
The only real problem is the shrinkage of x[t] which leads to dividing by zero--just a formula problem.
I need to overload the _stats function for my beta distribution. This is my current code:
from scipy.stats import beta
import scipy.stats as st
class CustomBeta(st.rv_continuous):
def _stats(self, a, b):
# will add own code here
mn = a * 1.0 / (a + b)
var = (a * b * 1.0) / (a + b + 1.0) / (a + b) ** 2.0
g1 = 2.0 * (b - a) * sqrt((1.0 + a + b) / (a * b)) / (2 + a + b)
g2 = 6.0 * (a ** 3 + a ** 2 * (1 - 2 * b) + b ** 2 * (1 + b) - 2 * a * b * (2 + b))
g2 /= a * b * (a + b + 2) * (a + b + 3)
return mn, var, g1, g2
dist = beta(4, 6)
print dist.rvs() # works fine
dist = CustomBeta(4, 6)
print dist.rvs() # crashes
Getting _rvs() from my custom object gives me a long stacktrace and an error
RuntimeError: maximum recursion depth exceeded
This has nothing to do with overloading _stats. The same behavior is caused simply by
class CustomBeta(st.rv_continuous):
pass
dist = CustomBeta(4, 6)
print(dist.rvs()) # crashes
The documentation of rv_continuous states that
New random variables can be defined by subclassing the rv_continuous class and re-defining at least the _pdf or the _cdf method.
You will need to provide at least one of these methods to compute the probability density function (pdf) or the cumulative probability density function(cdf).
Furthermore,
[rv_continuous] cannot be used directly as a distribution.
It is used as follows:
class CustomBetaGen(st.rv_continuous):
...
CustomBeta = CustomBetaGen(name='CustomBeta')
dist = CustomBeta(4, 6)
Finally, rvs.() does not seem to work properly for the beta distribution if you do not provide a _rvs method.
Putting everything together and stealing the appropriate methods from the beta distribution:
from scipy.stats import beta
import scipy.stats as st
import numpy as np
class CustomBetaGen(st.rv_continuous):
def _cdf(self, x, a, b):
return beta.cdf(x, a, b)
def _pdf(self, x, a, b):
return beta.pdf(x, a, b)
def _rvs(self, a, b):
return beta.rvs(a, b)
def _stats(self, a, b):
# will add own code here
mn = a * 1.0 / (a + b)
var = (a * b * 1.0) / (a + b + 1.0) / (a + b) ** 2.0
g1 = 2.0 * (b - a) * np.sqrt((1.0 + a + b) / (a * b)) / (2 + a + b)
g2 = 6.0 * (a ** 3 + a ** 2 * (1 - 2 * b) + b ** 2 * (1 + b) - 2 * a * b * (2 + b))
g2 /= a * b * (a + b + 2) * (a + b + 3)
return mn, var, g1, g2
CustomBeta = CustomBetaGen(name='CustomBeta')
dist = beta(4, 6)
print(dist.rvs()) # works fine
print(dist.stats()) # (array(0.4), array(0.021818181818181816))
dist = CustomBeta(4, 6)
print(dist.rvs()) # works fine
print(dist.stats()) # (array(0.4), array(0.021818181818181816))