I am trying to replicate the below R code to estimate parameters using Maximum Likelihood method in Python.
I want to obtain the same results using both the codes, but my estimated values are different, I am not sure if both the codes are optimising the same parameters.
R-Code:
ll <- function (prop, numerator, denominator) {
return(
lgamma(denominator + 1) -
lgamma( numerator + 1) -
lgamma(denominator - numerator + 1) +
numerator * log(prop) + (denominator - numerator) * log(1 - prop)
)
}
compLogLike <- function(pvec){
return(sum(ll(pvec, dat$C, dat$N)))
}
fct_p_ll <- function(a, c0, c1){
xa_ <- exp(c0 + c1*c(20, a))
return(1 - exp((xa_[1]-xa_[2])/c1))
}
fct_ll <- function(x){
pv <- sapply(22.5+5*(0:8), FUN = fct_p_ll, c0 = x[1], c1 = x[2])
return(compLogLike(pv))
}
opt.res <- optim(par = c(-9.2, 0.07), fn = fct_ll, control = list(fnscale = -1.0), hessian = TRUE)
fisherInfo <- solve(-opt.res$hessian)
propSigma <- sqrt(diag(fisherInfo))
upper <- opt.res$par+1.96*propSigma
lower <- opt.res$par-1.96*propSigma
interval <- data.frame(val = opt.res$par, ci.low=lower, ci.up = upper)
Python Code:
def ll(prop, numerator, denominator):
print(prop, numerator, denominator)
if prop > 0:
value = (math.lgamma(denominator + 1) -
math.lgamma( numerator + 1) -
math.lgamma(denominator - numerator + 1) +
numerator * math.log(prop) + (denominator - numerator) * math.log(1 - prop))
return value
return 0
def compLogLike(pvec):
p = list(pvec)
c = list(df["C"])
n = list(df["N"])
compLog = 0
for idx, val in enumerate(p):
compLog += ll(p[idx],c[idx],n[idx])
print(compLog)
return compLog
def fct_p_ll(a,c0,c1):
val_list = [c1 * val for val in [20, a]]
xa_ = np.exp([c0 + val for val in val_list])
return 1 - np.exp((xa_[0] -xa_[1])/c1)
def fct_ll(x):
ages_1 = np.arange(22.5, 67.5 , 5)
pv = [fct_p_ll(a=val,c0=x[0],c1=x[1]) for val in ages_1]
return compLogLike(pv)
opt = minimize(fct_ll, [-9.2, 0.07], method='Nelder-Mead', hess=Hessian(lambda x: fct_ll(x,a)))
Any inputs would be really helpful.
Related
I'm trying to convert pine script stdev to python code but it seems I'm doing it wrong
https://www.tradingview.com/pine-script-reference/v4/#fun_stdev
Pine script:
//the same on pine
isZero(val, eps) => abs(val) <= eps
SUM(fst, snd) =>
EPS = 1e-10
res = fst + snd
if isZero(res, EPS)
res := 0
else
if not isZero(res, 1e-4)
res := res
else
15
pine_stdev(src, length) =>
avg = sma(src, length)
sumOfSquareDeviations = 0.0
for i = 0 to length - 1
sum = SUM(src[i], -avg)
sumOfSquareDeviations := sumOfSquareDeviations + sum * sum
stdev = sqrt(sumOfSquareDeviations / length)
Python code:
import talib as ta
def isZero(val, eps):
if abs(val) <= eps:
return True
else:
return False
def SUM(fst, snd):
EPS = 1e-10
res = fst + snd
if isZero(res, EPS):
res += 0
else:
if not isZero(res, 1e-4):
res = res
else:
res = 15
return res
def pine_stdev(src, length):
avg = ta.SMA(src, length)
sumOfSquareDeviations = 0.0
for i in range(length - 1):
s = SUM(src.iloc[i], -avg.iloc[i])
sumOfSquareDeviations = sumOfSquareDeviations + s * s
stdev = (sumOfSquareDeviations / length)*(sumOfSquareDeviations / length)
What am I doing wrong? And why SUM function returns 15?
Trading View has made a mistake in the code on the site.
The number "15" should be written as "1e-5".
You can use this code:
def SUM(fst, snd):
EPS = 1e-10
res = fst + snd
if isZero(res, EPS):
res = 0
else:
if not isZero(res, 1e-4):
res = res
else:
res = 1e-5
return res
Hi I am trying to write a trust-region algorithm using the dogleg method with python for a class I have. I have a Newton's Method algorithm and Broyden's Method algorthm that agree with each other but I can't seem to get this Dogleg method to work.
Here is the function I am trying to find the solution to:
def test_function(x):
x1 = float(x[0])
x2 = float(x[1])
r = np.array([[x2**2 - 1],
[np.sin(x1) - x2]])
return r
and here is the jacobian I wrote
def Test_Jacobian(x, size):
e = create_ID_vec(size)
#print(e[0])
epsilon = 10e-8
J = np.zeros([size,size])
#print (J)
for i in range(0, size):
for j in range(0, size):
J[i][j] = ((test_function(x[i]*e[j] + epsilon*e[j])[i] - test_function(x[i]*e[j])[i])/epsilon)
return J
and here is my Trust-Region algorithm:
def Trust_Region(x):
trust_radius = 1
max_trust = 300
eta = rand.uniform(0,.25)
r = test_function(x) # change to correspond with the function you want
J = Test_Jacobian(r, r.size) # change to correspond with function
i = 0
iteration_table = [i]
function_table = [vector_norm(r, r.size)]
while vector_norm(r, r.size) > 10e-10:
print(x, 'at iteration', i, "norm of r is", vector_norm(r, r.size))
p = dogleg(x, r, J, trust_radius)
rho = ratio(x, J, p)
if rho < 0.25:
print('first')
trust_radius = 0.25*vector_norm(p,p.size)
elif rho > 0.75 and vector_norm(p,p.size) == trust_radius:
print('second')
trust_radius = min(2*trust_radius, max_trust)
else:
print('third')
trust_radius = trust_radius
if rho > eta:
print('x changed')
x = x + p
#r = test_function(x)
#J = Test_Jacobian(r, r.size)
else:
print('x did not change')
x = x
r = test_function(x) # change to correspond with the function you want
J = Test_Jacobian(r, r.size) # change to correspond with function
i = i + 1
#print(r)
#print(J)
#print(vector_norm(p,p.size))
print(rho)
#print(trust_radius)
iteration_table.append(i)
function_table.append(vector_norm(r,r.size))
print ('The solution to the non-linear equation is: ', x)
print ('This solution was obtained in ', i, 'iteratations')
plt.figure(figsize=(10,10))
plt.plot(iteration_table, np.log10(function_table))
plt.xlabel('iteration number')
plt.ylabel('function value')
plt.title('Semi-Log Plot for Convergence')
return x, iteration_table, function_table
def dogleg(x, r, J, trust_radius):
tau_k = min(1, vector_norm(J.transpose().dot(r), r.size)**3/(trust_radius*r.transpose().dot(J).dot(J.transpose().dot(J)).dot(J.transpose()).dot(r)))
p_c = -tau_k*(trust_radius/vector_norm(J.transpose().dot(r), r.size))*J.transpose().dot(r)
if vector_norm(p_c, p_c.size) == trust_radius:
print('using p_c')
p_k = p_c
else:
p_j = -np.linalg.inv(J.transpose().dot(J)).dot(J.transpose().dot(r))
print ('using p_j')
tau = tau_finder(x, p_c, p_j, trust_radius, r.size)
p_k = p_c + tau*(p_j-p_c)
return p_k
def ratio(x, J, p):
r = test_function(x)
r_p = test_function(x + p)
print (vector_norm(r, r.size)**2)
print (vector_norm(r_p, r_p.size)**2)
print (vector_norm(r + J.dot(p), r.size)**2)
rho_k =(vector_norm(r, r.size)**2 - vector_norm(r_p, r_p.size)**2)/(vector_norm(r, r.size)**2 - vector_norm(r + J.dot(p), r.size)**2)
return rho_k
def tau_finder(x, p_c, p_j, trust_radius, size):
a = 0
b = 0
c = 0
for i in range(0, size):
a = a + (p_j[i] - p_c[i])**2
b = b + 2*(p_j[i] - p_c[i])*(p_c[i] - x[i])
c = (p_c[i] - x[i])**2
c = c - trust_radius**2
tau_p = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
tau_m = (-b - np.sqrt(b**2 - 4*a*c))/(2*a)
#print(tau_p)
#print(tau_m)
if tau_p <= 1 and tau_p >=0:
return tau_p
elif tau_m <= 1 and tau_m >=0:
return tau_m
else:
print('error')
return 'error'
def model_function(p):
r = test_function(x)
J = Test_Jacobian(r, r.size)
return 0.5*vector_norm(r + J.dot(p), r.size)**2
The answer should be about [[1.57076525], [1. ]]
but here is the output after about 28-30 iterations:
ZeroDivisionError Traceback (most recent call last)
<ipython-input-359-a414711a1671> in <module>
1 x = create_point(2,1)
----> 2 Trust_Region(x)
<ipython-input-358-7cb77bd44d7b> in Trust_Region(x)
11 print(x, 'at iteration', i, "norm of r is", vector_norm(r, r.size))
12 p = dogleg(x, r, J, trust_radius)
---> 13 rho = ratio(x, J, p)
14
15 if rho < 0.25:
<ipython-input-358-7cb77bd44d7b> in ratio(x, J, p)
71 print (vector_norm(r_p, r_p.size)**2)
72 print (vector_norm(r + J.dot(p), r.size)**2)
---> 73 rho_k =(vector_norm(r, r.size)**2 - vector_norm(r_p, r_p.size)**2)/(vector_norm(r, r.size)**2 - vector_norm(r + J.dot(p), r.size)**2)
74 return rho_k
75
ZeroDivisionError: float division by zero
def rainflow(pr, dt, ci, k, tabo):
phi = tabo/dt
qsim = pd.Series(data=None, index=pr.index)
qsim.iloc[0] = ci
for i in range(1, len(qsim) + 1):
qsim.iloc[i] = k * (pr.iloc[i] - (phi * (qsim.iloc[i-1] / pr.iloc[i-1])))
return qsim.iloc
def irmse(dif, obs):
rmse = np.sqrt(np.mean(dif ** 2) / len(obs))
delta = obs - obs.shift(1)
delta_prom = np.mean(delta)
sigma_obs = (np.sqrt((np.sum((delta - delta_prom) ** 2) / (len(delta)) - 1)))
irmse = rmse / sigma_obs
return irmse
def obj_fun(par, arg):
sim = rainflow(arg[1], arg[2], par[0], par[1])
dif = arg[0] - sim
return irmse(dif, arg[0])
df_data = pd.ExcelFile('Taller_opt.xlsx').parse(sheetname='caudal', index_col='Fecha')
sr_pr_cal = df_data['PT'].iloc[0:int(len(df_data['PT']) * 0.7)]
sr_pr_val = df_data['PT'].iloc[int(len(df_data['PT']) * 0.7):]
sr_qobs_cal = df_data['Qobs'].iloc[0:int(len(df_data['Qobs']) * 0.7)]
sr_qobs_val = df_data['Qobs'].iloc[int(len(df_data['Qobs']) * 0.7):]
dt = 1.
ci = sr_qobs_cal.iloc[0]
met_opt = 'minimize'
par_ini = [0.5, 10]
min_results = so.minimize(fun=obj_fun, x0=par_ini, args=[sr_pr_cal, sr_qobs_cal], method='Nelder-Mead')
I'm trying to optimize my equation but it's give:
File "C:/Users/yeni/PycharmProjects/untitled/new.py", line 23, in obj_fun
sim = rainflow(arg[1], arg[2], par[0], par[1])
IndexError: list index out of range
"IndexError: list index out of range" Why is this happening?
How can ir fix it?
The formula for half vector is (Hv) = (Lv + Vv) / |Lv+Vv|, where Lv is light vector, and Vv is view vector.
Am I doing this right in Python code?
Vvx = 0-xi # view vector (calculating it from surface points)
Vvy = 0-yi
Vvz = 0-zi
Vv = math.sqrt((Vvx * Vvx) + (Vvy * Vvy) + (Vvz * Vvz)) # normalizing
Vvx = Vvx / Vv
Vvy = Vvy / Vv
Vvz = Vvz / Vv
Lv = (1,1,1) # light vector
Hn = math.sqrt(((1 + Vvx) * (1 + Vvx)) + ((1 + Vvy) * (1 + Vvy)) +
((1 + Vvz) * (1 + Vvz)))
Hv = ((1 + Vvx) / Hn, (1 + Vvy) / Hn, (1 + Vvz) / Hn) # half-way vector
This is misnamed. What you've written is simple vector addition of two vectors, with the result being a normalized unit vector.
Here's how I'd do it:
import math
def magnitude(v):
return math.sqrt(sum(v[i]*v[i] for i in range(len(v))))
def add(u, v):
return [ u[i]+v[i] for i in range(len(u)) ]
def sub(u, v):
return [ u[i]-v[i] for i in range(len(u)) ]
def dot(u, v):
return sum(u[i]*v[i] for i in range(len(u)))
def normalize(v):
vmag = magnitude(v)
return [ v[i]/vmag for i in range(len(v)) ]
if __name__ == '__main__':
l = [1, 1, 1]
v = [0, 0, 0]
h = normalize(add(l, v))
print h
I've been working on a computational physics project (plotting related rates of chemical reactants with respect to eachother to show oscillatory behavior) with a fair amount of success. However, one of my simulations involves more than two active oscillating agents (five, in fact) which would obviously be unsuitable for any single visual plot...
My scheme was hence to have the user select which two reactants they wanted plotted on the x-axis and y-axis respectively. I tried (foolishly) to convert string input values into the respective variable names, but I guess I need a radically different approach if any exist?
If it helps clarify any, here is part of my code:
def coupledBrusselator(A, B, t_trial,display_x,display_y):
t = 0
t_step = .01
X = 0
Y = 0
E = 0
U = 0
V = 0
dX = (A) - (B+1)*(X) + (X**2)*(Y)
dY = (B)*(X) - (X**2)*(Y)
dE = -(E)*(U) - (X)
dU = (U**2)*(V) -(E+1)*(U) - (B)*(X)
dV = (E)*(U) - (U**2)*(V)
array_t = [0]
array_X = [0]
array_Y = [0]
array_U = [0]
array_V = [0]
while t <= t_trial:
X_1 = X + (dX)*(t_step/2)
Y_1 = Y + (dY)*(t_step/2)
E_1 = E + (dE)*(t_step/2)
U_1 = U + (dU)*(t_step/2)
V_1 = V + (dV)*(t_step/2)
dX_1 = (A) - (B+1)*(X_1) + (X_1**2)*(Y_1)
dY_1 = (B)*(X_1) - (X_1**2)*(Y_1)
dE_1 = -(E_1)*(U_1) - (X_1)
dU_1 = (U_1**2)*(V_1) -(E_1+1)*(U_1) - (B)*(X_1)
dV_1 = (E_1)*(U_1) - (U_1**2)*(V_1)
X_2 = X + (dX_1)*(t_step/2)
Y_2 = Y + (dY_1)*(t_step/2)
E_2 = E + (dE_1)*(t_step/2)
U_2 = U + (dU_1)*(t_step/2)
V_2 = V + (dV_1)*(t_step/2)
dX_2 = (A) - (B+1)*(X_2) + (X_2**2)*(Y_2)
dY_2 = (B)*(X_2) - (X_2**2)*(Y_2)
dE_2 = -(E_2)*(U_2) - (X_2)
dU_2 = (U_2**2)*(V_2) -(E_2+1)*(U_2) - (B)*(X_2)
dV_2 = (E_2)*(U_2) - (U_2**2)*(V_2)
X_3 = X + (dX_2)*(t_step)
Y_3 = Y + (dY_2)*(t_step)
E_3 = E + (dE_2)*(t_step)
U_3 = U + (dU_2)*(t_step)
V_3 = V + (dV_2)*(t_step)
dX_3 = (A) - (B+1)*(X_3) + (X_3**2)*(Y_3)
dY_3 = (B)*(X_3) - (X_3**2)*(Y_3)
dE_3 = -(E_3)*(U_3) - (X_3)
dU_3 = (U_3**2)*(V_3) -(E_3+1)*(U_3) - (B)*(X_3)
dV_3 = (E_3)*(U_3) - (U_3**2)*(V_3)
X = X + ((dX + 2*dX_1 + 2*dX_2 + dX_3)/6) * t_step
Y = Y + ((dX + 2*dY_1 + 2*dY_2 + dY_3)/6) * t_step
E = E + ((dE + 2*dE_1 + 2*dE_2 + dE_3)/6) * t_step
U = U + ((dU + 2*dU_1 + 2*dY_2 + dE_3)/6) * t_step
V = V + ((dV + 2*dV_1 + 2*dV_2 + dE_3)/6) * t_step
dX = (A) - (B+1)*(X) + (X**2)*(Y)
dY = (B)*(X) - (X**2)*(Y)
t_step = .01 / (1 + dX**2 + dY**2) ** .5
t = t + t_step
array_X.append(X)
array_Y.append(Y)
array_E.append(E)
array_U.append(U)
array_V.append(V)
array_t.append(t)
where previously
display_x = raw_input("Choose catalyst you wish to analyze in the phase/field diagrams (X, Y, E, U, or V) ")
display_y = raw_input("Choose one other catalyst from list you wish to include in phase/field diagrams ")
coupledBrusselator(A, B, t_trial, display_x, display_y)
Thanks!
Once you have calculated the different arrays, you could add them to a dict that maps names to arrays. This can then be used to look up the correct arrays for display_x and display_y:
named_arrays = {
"X": array_X,
"Y": array_Y,
"E": array_E,
...
}
return (named_arrays[display_x], named_arrays[display_y])