Glicko-2: Exploding Rating Issue - python
This may not be the right place to ask the problem but please re-direct me if that's the case, I've tried to implement the glicko-2 system as shown in the pdf here Glicko-2 System Implementation.
Below shows a set of 10 outcomes between 2 players:
Player 1 (Rating: 1500, Rating Deviation: 200, Volatility: 0.06, System Constant: 0.5)
Player 2 (Rating: 1400, Rating Deviation: 30, Volatility: 0.06, System Constant: 0.5)
For some reason the ratings seem to explode after a few outcomes are calculated, I'm not certain where this explosion of rating comes from. Was hoping someone would be able to lend a hand on where the issue comes from.
The code execution will fail with an error such as ZeroDivisionError: float division by zero however I'm not sure if this is just an error that has been created due to another issue.
import math
import time
import numpy as np
def mu(rating: float) -> float:
output = (rating - 1500) / 173.7178
return output
def phi(rating_dev: float) -> float:
output = rating_dev / 173.7178
return output
def G(phi: float) -> float:
output = 1 / math.sqrt(1 + (3 * phi ** 2 / math.pi ** 2))
return output
def Expected(mu: float, mu_j: float, phi_j: float) -> float:
output = 1 / (1 + math.exp(-G(phi_j) * (mu - mu_j)))
return output
def v(O_j: float, Exp: float) -> float:
output = 1 / ((G(O_j) ** 2) * Exp * (1 - Exp))
return output
def Est_Impro(O_j: float, W1: float, Exp: float, V: float) -> float:
output = V * G(O_j) * (W1 - Exp)
return output
def f(Delta: float, O: float, V: float, A: float, Sys_Const: float, init: float = 0) -> float:
Upper = math.exp(init) * ((Delta ** 2) - (O ** 2) - V - math.exp(init))
Lower = 2 * ((O ** 2 + V + math.exp(init)) ** 2)
Side = (init - A) / (Sys_Const ** 2)
print(f"f: {(Upper / Lower) - Side}")
return (Upper / Lower) - Side
def new_vol(sig: float, Delta: float, O: float, V: float, r: float) -> float:
A = math.log(sig ** 2)
if Delta ** 2 > (O ** 2) + V:
B = math.log((Delta ** 2) - (O ** 2) - V)
else:
k = 1
while f(Delta, O, V, A, r, A - (k * r)) < 0:
k += 1
B = A - (k * r)
f_A = f(Delta, O, V, A, r, A)
f_B = f(Delta, O, V, A, r, B)
while abs(B - A) > 0.000001:
print(f"Iterations")
C = A + ((A - B) * f_A) / (f_B - f_A)
f_C = f(Delta, O, V, A, r, C)
if f_C * f_B <= 0:
A = B
f_A = f_B
else:
f_A = f_A / 2
B = C
f_B = f_C
new_sig = math.exp(A / 2)
return new_sig
def pre_rating_RD(O: float, new_sig: float) -> float:
output = math.sqrt(O ** 2 + new_sig ** 2)
return output
def update_Rating(U: float, O_dash: float, O_j: float, W1: float, Exp: float) -> float:
output = U + O_dash ** 2 * (G(O_j) * (W1 - Exp))
return output
def update_RD(O_star: float, V: float) -> float:
output = 1 / (math.sqrt((1 / O_star ** 2) + (1 / V)))
return output
def convert_rating_to_original_scale(U_dash: float) -> float:
output = 1500 + 173.7178 * U_dash
return output
def convert_RD_to_original_scale(O_dash: float) -> float:
output = 173.7178 * O_dash
return output
def get_rating(P1_R: float, P2_R: float,
P1_RD: float, P2_RD: float,
P1_Vol: float, P1_SysConst: float, P1_Outcome: float) -> tuple[float, float, float]:
U, U_j = mu(P1_R), mu(P2_R)
O, O_j = phi(P1_RD), phi(P2_RD)
# print(f"P1 mu: {U}, P2 mu: {U_j}")
# print(f"P1 phi: {O}, P2 phi: {O_j}")
E = Expected(U, U_j, O_j)
# print(f"E: {E}")
V = v(O_j, E)
Delta = Est_Impro(O_j, P1_Outcome, E, V)
# print(f"v: {V}, Delta: {Delta}")
new_volatility = new_vol(P1_Vol, Delta, O, V, P1_SysConst)
O_star = pre_rating_RD(O, new_volatility)
O_dash = update_RD(O_star, V)
U_dash = update_Rating(U, O_dash, O_j, P1_Outcome, E)
# print(f"phi_star: {O_star}, phi_dash: {O_dash}, mu_dash: {U_dash}")
rating_dash = convert_rating_to_original_scale(U_dash)
RD_dash = convert_RD_to_original_scale(O_dash)
print(f"New Rating: {rating_dash}, New RD: {RD_dash}, New Volatility: {new_volatility}")
return rating_dash, RD_dash, new_volatility
def get_player_ratings(matches: list) -> None:
P1_R, P1_RD, P1_Vol, P1_SysConst = 1500, 200, 0.06, 0.5
P2_R, P2_RD, P2_Vol, P2_SysConst = 1400, 30, 0.06, 0.5
for match in matches:
P1_outcome, P2_outcome = match[2], (1 - match[2])
P1_R, P1_RD, P1_Vol = get_rating(P1_R, P2_R, P1_RD, P2_RD, P1_Vol, P1_SysConst,
P1_outcome)
P2_R, P2_RD, P2_Vol = get_rating(P2_R, P1_R, P2_RD, P1_RD, P2_Vol, P2_SysConst,
P2_outcome)
print(P1_R, P1_RD, P1_Vol)
print(P2_R, P2_RD, P2_Vol)
def main():
matches = []
for _ in range(10):
P1 = np.random.choice(['Player 1', 'Player 2'])
if P1 == 'Player 1':
P2 = 'Player 2'
else:
P2 = 'Player 1'
matches.append([P1, P2, np.random.choice([0, 1])])
get_player_ratings(matches)
if __name__ == '__main__':
start = time.perf_counter()
main()
end = time.perf_counter()
print(f"Time Taken: {end - start}")
Related
converting the 1D np array to 2D np array
The nPhotonDesity is 1 D <class 'numpy.ndarray'> , How can I convert it to 2D <class 'numpy.ndarray'> ? ####################################################### def getPhotonDensity( pulseWidth, PhotonsTotalNumber, pulseShapeName, xPosition=0): z = [] if pulseShapeName == ePulseShape.RECT: Io = PhotonsTotalNumber/len(x); z = getTransmitedPulse(ePulseShape.RECT.name, T, Io, 0, pulseWidth)(t) elif pulseShapeName == ePulseShape.GAUSSIAN: Io = PhotonsTotalNumber; z = getTransmitedPulse(ePulseShape.GAUSSIAN.name, T, Io, 0, pulseWidth)(t) return z; def getTransmitedPulse(name, T, Io, mu=0, pulseWidth = 0, rolloff=None ): def rc(t, beta): import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") return np.sinc(t)*np.cos(np.pi*beta*t)/(1-(2*beta*t)**2) def rrc(t, beta): return (np.sin(np.pi*t*(1-beta))+4*beta*t*np.cos(np.pi*t*(1+beta)))/(np.pi*t*(1-(4*beta*t)**2)) # rolloff is ignored for triang and rect if name == ePulseShape.RECT.name: return lambda t: Io*(abs(t/T) < pulseWidth).astype(int) elif name == 'triang': return lambda t: (1-abs(t/T)) * (abs(t/T) < pulseWidth ).astype(float) elif name == 'rc': return lambda t: rc(t/T, rolloff) elif name == 'rrc': return lambda t: rrc(t/T, rolloff) elif name == ePulseShape.GAUSSIAN.name: return lambda t: Io/(pulseWidth * np.sqrt(2 * np.pi)) * np.exp( - (t/T - mu)**2 / (2 * pulseWidth**2) ) nPhotonDesity = getPhotonDensity(pulseWidth, PhotonsTotalNumber, pulseShapeName)
Maximum Likelihood Estimation- Replicating from R to Python
I am trying to replicate the below R code to estimate parameters using Maximum Likelihood method in Python. I want to obtain the same results using both the codes, but my estimated values are different, I am not sure if both the codes are optimising the same parameters. R-Code: ll <- function (prop, numerator, denominator) { return( lgamma(denominator + 1) - lgamma( numerator + 1) - lgamma(denominator - numerator + 1) + numerator * log(prop) + (denominator - numerator) * log(1 - prop) ) } compLogLike <- function(pvec){ return(sum(ll(pvec, dat$C, dat$N))) } fct_p_ll <- function(a, c0, c1){ xa_ <- exp(c0 + c1*c(20, a)) return(1 - exp((xa_[1]-xa_[2])/c1)) } fct_ll <- function(x){ pv <- sapply(22.5+5*(0:8), FUN = fct_p_ll, c0 = x[1], c1 = x[2]) return(compLogLike(pv)) } opt.res <- optim(par = c(-9.2, 0.07), fn = fct_ll, control = list(fnscale = -1.0), hessian = TRUE) fisherInfo <- solve(-opt.res$hessian) propSigma <- sqrt(diag(fisherInfo)) upper <- opt.res$par+1.96*propSigma lower <- opt.res$par-1.96*propSigma interval <- data.frame(val = opt.res$par, ci.low=lower, ci.up = upper) Python Code: def ll(prop, numerator, denominator): print(prop, numerator, denominator) if prop > 0: value = (math.lgamma(denominator + 1) - math.lgamma( numerator + 1) - math.lgamma(denominator - numerator + 1) + numerator * math.log(prop) + (denominator - numerator) * math.log(1 - prop)) return value return 0 def compLogLike(pvec): p = list(pvec) c = list(df["C"]) n = list(df["N"]) compLog = 0 for idx, val in enumerate(p): compLog += ll(p[idx],c[idx],n[idx]) print(compLog) return compLog def fct_p_ll(a,c0,c1): val_list = [c1 * val for val in [20, a]] xa_ = np.exp([c0 + val for val in val_list]) return 1 - np.exp((xa_[0] -xa_[1])/c1) def fct_ll(x): ages_1 = np.arange(22.5, 67.5 , 5) pv = [fct_p_ll(a=val,c0=x[0],c1=x[1]) for val in ages_1] return compLogLike(pv) opt = minimize(fct_ll, [-9.2, 0.07], method='Nelder-Mead', hess=Hessian(lambda x: fct_ll(x,a))) Any inputs would be really helpful.
Is there a way to create a sign function in python?
import re import math from random import randint import hashlib P = (-0.15, 2.645) Q = (0.7, 2.71) max_mod = 1.158 * 10 ** 77 def SHA256_INT(text): return int(f'0x{hashlib.sha256(text.encode("ascii")).hexdigest()}', 0) def ecc_double_slope(P): slope = (3 * P[0] ** 2) / (2 * P[1]) return slope def ecc_add(P, Q, slope): xr = slope ** 2 - P[0] - Q[0] yr = slope * (P[0] - xr) - P[1] return (xr, yr) def ecc_double(P): slope = ecc_double_slope(P) sum0 = ecc_add(P, P, slope) return sum0 def ecc_double_for(P, limit): lis = [] for i in range(limit): b = ecc_double(P) P = b lis.append((2 ** i, b)) return lis def greedy2(number): x = 0 dub_lis = [] add_lis = [] while 2 ** x < number: dub_lis.append(2 ** x) x += 1 x -= 1 n = 2 ** x while n != number: y = x while n + (2 ** y) > number: y -= 1 n += (2 ** y) x += 1 add_lis.append(2 ** y) return len(dub_lis), add_lis def ecc_main_add(P, Q): x1 = P[0] y1 = P[1] x2 = Q[0] y2 = Q[1] slope = (y2 - y1) / (x2 - y1) xr = slope ** 2 - x1 - x2 yr = slope * (x1 - xr) - y1 return xr, yr def gen_points(gen, points): if gen == 1: return points elif gen > 1: if type(points) == int: point_curve = (points, math.sqrt(points ** 3 + 7)) elif type(points) == tuple: point_curve = points point_steps = greedy2(gen) point_dub = point_steps[0] point_add = point_steps[1] dub_list = ecc_double_for(point_curve, point_dub) for i in point_add: b = ecc_main_add(dub_list[int(math.log(i, 2))][1], dub_list[-1][1]) return b, gen elif gen == 0: return 0 def sign(messsage, private_key, public_key, G): global max_mod msg_hash = SHA256_INT(messsage) randnum = randint(2, max_mod) r = gen_points(randnum, G)[0] r_x = r[0] msg_hash = SHA256_INT(messsage) s = (r_x * private_key + msg_hash) / randnum p1 = gen_points(msg_hash/s, G) p2 = gen_points(r_x/s, public_key) p3 = ecc_main_add(p1, p2) return p3 pub, priv = gen_points(9756, P) print(sign('hello', priv, pub, P)) Hello, I'm trying to implement a way to sign messages using this tutorial: https://learnmeabitcoin.com/beginners/digital_signatures_signing_verifying I've tried to follow the tutorial before, but it's either my key generation is wrong, I didn't follow the tutorial correctly, or both. My current code generates an error, I've already debugged it and it generates the error: TypeError: 'NoneType' object is not subscriptable. Does anyone know how to solve this?
Converting floats in Python to Scientific Numbers
So I have an application in Python that calculates the variable number in the "PV = nRT" chemical equation. The code is like this: r = 0.082 # Variables p = float(input('Pressure = ')) p_unit = input('Unit = ') print('_____________________') v = float(input('Volume = ')) v_unit = input('Unit = ') print('_____________________') n = float(input('Moles = ')) print('_____________________') t = float(input('Temperature = ')) t_unit = input('Unit = ') # Unit Conversion if p_unit == 'bar': p = p * 0.987 if v_unit == 'cm3': v = v / 1000 if v_unit == 'm3': v = v * 1000 if t_unit == 'c': t = t + 273 if t_unit == 'f': t = ((t - 32) * (5 / 9)) + 273 # Solve Equation def calc(): if p == 000: return (n * r * t) / v if v == 000: return (n * r * t) / p if n == 000: return (p * v) / (r * t) if t == 000: return (p * v) / (n * r) and then at the end I run the function to get the result. But the problem is I want to convert the result to a Scientific Number (e.g. 0.005 = 5 x 10^-3). I tried the solution below: def conv_to_sci(num): i = 0 if num > 10: while num > 10: num / 10 i = i - 1 if num < 10: while num < 10: num * 10 i = i + 1 return num + "x 10^" + i but it didn't work. Any questions?
I'd just use numpy to get scientific notation import numpy as np num = 0.005 num_sc = np.format_float_scientific(num) >>> num_sc '5.e-03'
Use str.format "{:.0e}".format(0.005) This will print: '5e-03' Or, def conv_to_sci(num): i = 0 while int(num) != num: num *= 10 i += 1 return "{0} x 10^{1}".format(int(num), i) conv_to_sci(0.005) Will give: '5 x 10^3'
Solving differential equations, list index out of range
def rainflow(pr, dt, ci, k, tabo): phi = tabo/dt qsim = pd.Series(data=None, index=pr.index) qsim.iloc[0] = ci for i in range(1, len(qsim) + 1): qsim.iloc[i] = k * (pr.iloc[i] - (phi * (qsim.iloc[i-1] / pr.iloc[i-1]))) return qsim.iloc def irmse(dif, obs): rmse = np.sqrt(np.mean(dif ** 2) / len(obs)) delta = obs - obs.shift(1) delta_prom = np.mean(delta) sigma_obs = (np.sqrt((np.sum((delta - delta_prom) ** 2) / (len(delta)) - 1))) irmse = rmse / sigma_obs return irmse def obj_fun(par, arg): sim = rainflow(arg[1], arg[2], par[0], par[1]) dif = arg[0] - sim return irmse(dif, arg[0]) df_data = pd.ExcelFile('Taller_opt.xlsx').parse(sheetname='caudal', index_col='Fecha') sr_pr_cal = df_data['PT'].iloc[0:int(len(df_data['PT']) * 0.7)] sr_pr_val = df_data['PT'].iloc[int(len(df_data['PT']) * 0.7):] sr_qobs_cal = df_data['Qobs'].iloc[0:int(len(df_data['Qobs']) * 0.7)] sr_qobs_val = df_data['Qobs'].iloc[int(len(df_data['Qobs']) * 0.7):] dt = 1. ci = sr_qobs_cal.iloc[0] met_opt = 'minimize' par_ini = [0.5, 10] min_results = so.minimize(fun=obj_fun, x0=par_ini, args=[sr_pr_cal, sr_qobs_cal], method='Nelder-Mead') I'm trying to optimize my equation but it's give: File "C:/Users/yeni/PycharmProjects/untitled/new.py", line 23, in obj_fun sim = rainflow(arg[1], arg[2], par[0], par[1]) IndexError: list index out of range "IndexError: list index out of range" Why is this happening? How can ir fix it?