Python Scipy Minimize Not Working - python

I try to minimize the vectors x,y, but they just satisfied constraints with no work for minimizing.
e.g:
input init: x=[0.2,0.3,0.5] (sum of elements is 1) ,feedback: res.x=[0.,0.3,0.5],it hasn't changed at all!
# -*- coding:utf8-*-
import random
import numpy as np
from scipy import optimize
import networkx as nx
import matplotlib.pyplot as plt
def Ud(x, X, Aj, G, p):
lost = G.node[Aj[-1]]["weight"]
Aj = Aj[:-1]
P = 1
sum = 0
for xi in x:
for Xi in X:
N = set(Aj).intersection(set(Xi))
# N=[random.randint(90,120) for _ in range(0,1)]
for n in N:
P *= (1 - p[n - 1])
sum += xi * P
P = 1
return -lost * sum
### objective function for defenders ###
def min_Ud(x, X, A, G, p):
min = float("inf")
for Aj in A:
temp_min = Ud(x, X, Aj, G, p)
if temp_min < min:
min = temp_min
return -min
### objective function for attackers ###
def Ua(a, X, A, G, p):
sum = 0
P = 1
for aj, Aj in a, A:
for Xi in X:
N = set(Aj[:-1]).intersection(set(Xi))
for n in N:
P *= (1 - p[n - 1])
sum += G.node[Aj[-1]]["weight"] * aj * P
P = 1
return sum
### fun for LP ###
def coreLP(X, A, G, p):
x0 = np.array([0.7, 0.2, 0.1])
a0 = np.array([0.5, 0.2, 0.3])
x_res = float("inf")
a_res = float("inf")
def c1(x):
return x.sum() - 1
cons = ({'type': 'eq', 'fun': c1})
Ud_star = optimize.minimize(min_Ud, x0, args=(X, A, G, p), constraints=cons, bounds=((0, 1), (0, 1), (0, 1)))
Ua_star = optimize.minimize(min_Ud, a0, args=(X, A, G, p), constraints=cons, bounds=((0, 1), (0, 1), (0, 1)))
print Ud_star
print Ua_star
return Ud_star.x, Ua_star.x

Related

Plotting Monte Carlo Simulations for option pricing in Python

I am trying to show the monte carlo barrier prices for different number of simultations in the x axis. This is what i tried so far but i'm getting the error -> ValueError: x and y must have same first dimension, but have shapes (10,) and (5,).
I am new to python and as hard as i try i cannot find the error
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
def mc_single_barrier_do(S0, K, T, H, r, vol, N, M):
# Constants
dt = T / N # change in time
nudt = (r - 0.5 * vol ** 2) * dt # deterministic component
volsdt = vol * np.sqrt(dt) # diffusion coefficient
erdt = np.exp(r * dt) # discount factor
# Standard Error Placeholders
sum_CT = 0
sum_CT2 = 0
# Monte Carlo Method
for i in range(M):
# Barrier Crossed Flag
BARRIER = False
St = S0
for j in range(N):
epsilon = np.random.normal()
Stn = St * np.exp(nudt + volsdt * epsilon)
St = Stn
Ptn = np.exp(-2. * (H - St) * (H - Stn) / (St ** 2. * volsdt ** 2.))
Pt = Ptn
if Pt >= npr.uniform():
BARRIER = True
if np.amin(St) > H and BARRIER == False:
CT = np.maximum(St - K, 0)
else:
CT = 0.
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT * CT
C0_MC = np.exp(-r * T) * sum_CT / M
return C0_MC
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
assert (method in ['MC', 'AV', 'CV'])
mean_payoffs = np.zeros(int(np.ceil(max_sample / 10)))
if method == 'MC':
for n_sample in range(10, max_sample + 1, 10):
payoffs = mc_single_barrier_do(n_sample, S0, K, T, H, r, vol, N)
mean_payoffs[int(n_sample / 10 - 1)] = np.mean(payoffs)
return mean_payoffs
r = 0.1
vol = 0.2
T = 2
N = 20
dt = T / N
S0 = 50
K = 50
H = 45
max_sample = 100
MC_price_estimates = sim_iterator(S0, T, r, vol, K, H, max_sample, N, method='MC')
x_axis1 = range(10, max_sample + 1, 10)
plt.plot(x_axis1, MC_price_estimates)
plt.xlabel("No. of Simulations")
plt.ylabel("Estimated option price")
plt.title("Ordinary Monte Carlo Method")
plt.legend()
plt.show()
in your function definition you used:
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
while when using the function you used:
MC_price_estimates = sim_iterator(S0, T, r, vol, K, H, max_sample, N, method='MC')
python has positional arguments, which means the arguments are mapped according to their position, not their name, so in the first position is mapped to the first argument, which means S0 in the second line was mapped to max_sample in the first line, just fix the arguments arrangement, or use keyword arguments S0=S0.
MC_price_estimates = sim_iterator(S0=S0, T=T, r=r, vol=vol, K=K, H=H, max_sample=max_sample, N=N, method='MC')
this is what your code will look like when you fix all arguments to be keyword arguments.
def mc_single_barrier_do(S0, K, T, H, r, vol, N, M):
# Constants
dt = T / N # change in time
nudt = (r - 0.5 * vol ** 2) * dt # deterministic component
volsdt = vol * np.sqrt(dt) # diffusion coefficient
erdt = np.exp(r * dt) # discount factor
# Standard Error Placeholders
sum_CT = 0
sum_CT2 = 0
# Monte Carlo Method
for i in range(M):
# Barrier Crossed Flag
BARRIER = False
St = S0
for j in range(N):
epsilon = np.random.normal()
Stn = St * np.exp(nudt + volsdt * epsilon)
St = Stn
Ptn = np.exp(-2. * (H - St) * (H - Stn) / (St ** 2. * volsdt ** 2.))
Pt = Ptn
if Pt >= npr.uniform():
BARRIER = True
if np.amin(St) > H and BARRIER == False:
CT = np.maximum(St - K, 0)
else:
CT = 0.
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT * CT
C0_MC = np.exp(-r * T) * sum_CT / M
return C0_MC
def sim_iterator(max_sample, N, S0, T, r, vol, K, H, method):
assert (method in ['MC', 'AV', 'CV'])
mean_payoffs = np.zeros(int(np.ceil(max_sample / 10)))
if method == 'MC':
for n_sample in range(10, max_sample + 1, 10):
payoffs = mc_single_barrier_do(M=n_sample,S0= S0, K=K, T=T, H=H, r=r, vol=vol, N=N)
mean_payoffs[int(n_sample / 10 - 1)] = np.mean(payoffs)
return mean_payoffs
r = 0.1
vol = 0.2
T = 2
N = 20
dt = T / N
S0 = 50
K = 50
H = 45
max_sample = 100
MC_price_estimates = sim_iterator(S0=S0, T=T, r=r, vol=vol, K=K, H=H, max_sample=max_sample, N=N, method='MC')
x_axis1 = range(10, max_sample + 1, 10)
plt.plot(x_axis1, MC_price_estimates)
plt.xlabel("No. of Simulations")
plt.ylabel("Estimated option price")
plt.title("Ordinary Monte Carlo Method")
plt.legend()
plt.show()

Numerical optimization with Gradient Descent in Python

I'm trying to solve the next numerical optimization problem: find the vector x such that minimizes the cost function 0.5 * norm(Bx - v, 2)^2, where B is matrix and v is a vector. I have implemented two gradient descent algorithms. In one of them I manually tune the step-size, and in the other I calculate it automatically with equation (2.5) from ftp://lsec.cc.ac.cn/pub/yyx/papers/p0504.pdf. The gradient of the cost function is B^T(B*x - v).
Additionally, I compare my implementations with the solve(A, B) function from numpy.linalg, noting that the solution of the optimization problem is the solution of the linear system A*x = b, where A = B^T * B, b = B^T * v. So far, I'm getting poor results: large errors and long running times. I don't know it there is an error in my implementation or this is how these algorithms work in the computational experiments that I set up.
In the computational experiments, I generate random "solution" vectors x, and matrices B. Then compute A and b accordingly.
Any feedback is appreciated.
This is my code:
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
import time
def residue(x, B, v):
aux = np.dot(B, x) - v
aux = pow(LA.norm(aux, 2), 2)
aux = aux / pow(LA.norm(v, 2), 2)
return aux
def gradGD(x, B, v):
aux = np.dot(B, x) - v
return np.dot(B.T, aux)
def gradientDescent(B, v, alpha, tol, x0):
A = np.dot(B.T, B)
b = np.dot(B.T, v)
x = x0
while True:
res = residue(x, B, v)
print('Residue ', res)
if (res < tol):
break
x = x - alpha * gradGD(x, B, v)
return x
# Gradient descent with auto step-size
def gradientDescentBB(B, v, tol, x0):
x = x0
xpre = np.zeros((N, 1))
flag = 0
while True:
res = residue(x, B, v)
#print('Residue ', res)
if (res < tol):
break
if (flag == 0):
grad = gradGD(x, B, v)
x = x - (1e-06) * grad
flag = 1
continue
gradpre = grad
grad = gradGD(x, B, v)
y = grad - gradpre
s = x - xpre
# print('dot', np.dot(s.T, y))
# print('||y||_2 = ', LA.norm(y, 2))
alpha = np.dot(s.T, y) / pow(LA.norm(y, 2), 2)
# print("alpha = ", alpha)
xpre = x
x = x - alpha * grad
return x
# Solves the optimization problem via Ax * b
def solver(B, v):
A = np.dot(B.T, B)
b = np.dot(B.T, v)
return np.linalg.solve(A, b)
# Main routine
N = 1000
epsilon = 1.0e-6
a = 1/N - epsilon
iter = 20
mytime_iter = []
time2_iter = []
myeabs_iter = []
myerel_iter = []
myepercent_iter = []
cgseabs_iter = []
cgserel_iter = []
cgsepercent_iter = []
# Running the experiment many times
for i in range(iter):
print('Iteration: ', i)
B = a * np.random.randn(N, N) + np.ones((N, N))
#print(B)
x0 = np.random.randn(N, 1) # Real solution of the optmization problem
v = np.dot(B, x0)
mystart = time.time()
# x = gradientDescent(B, v, alpha=1999100e-09, tol=1e-05, x0=np.zeros((N, 1))) # Gradient Descent: Method 1
x = gradientDescentBB(B, v, tol=1e-05, x0=np.zeros((N, 1))) # Gradient Descent: Method 2
myend = time.time()
mytime = myend - mystart
start2 = time.time()
xalt = solver(B, v) # Solution of the optimization problem by solving A*x = b
end2 = time.time()
time2 = start2 - end2
myeabs = LA.norm(x - x0, 2)
myerel = myeabs / LA.norm(x0, 2)
myepercent = myerel * 100
cgseabs = LA.norm(xalt - x0, 2)
cgserel = cgseabs / LA.norm(x0, 2)
cgsepercent = cgserel * 100
mytime_iter.append(mytime)
time2_iter.append(time2)
myeabs_iter.append(myeabs)
myerel_iter.append(myerel)
myepercent_iter.append(myepercent)
cgseabs_iter.append(cgseabs)
cgserel_iter.append(cgserel)
cgsepercent_iter.append(cgsepercent)
plt.figure(1)
plt.plot(mytime_iter, 'bo', label="GD")
plt.plot(time2_iter, 'ro', label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Time (s)")
# plt.ylim(-1.5, 2.0) --
plt.figure(2)
plt.plot(myeabs_iter, "-b", label="GD")
plt.plot(cgseabs_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Absolute error")
plt.figure(3)
plt.plot(myerel_iter, "-b", label="GD")
plt.plot(cgserel_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Relative error")
plt.figure(4)
plt.plot(myepercent_iter, "-b", label="GD")
plt.plot(cgsepercent_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.ylabel("Relative error (%)")
plt.show()

vectorized implementation without using for loops

I'm trying to implement this code but it's quite slow because of two for loops. Can anyone suggest vectorized version of this code, please?
import numpy as np
P,Q = 1000,1000
thresh = 100
H = np.zeros((P,Q)
for u in range(P):
for v in range(Q):
if dist(u, v, P, Q) <= thresh:
H_LP[u, v] = 1
def dist(u, v, p, q):
return np.sqrt(np.square(u - p / 2) + np.square(v - q / 2))
Try this.
import numpy as np
P,Q = 1000,1000
thresh = 100
u = np.arange(P)
v = np.arange(Q)
dist_mat = np.sqrt(((u - P/2)**2)[:, None] + ((v - Q/2)**2)[None, :])
H = np.zeros((P, Q))
H[dist_mat <= thresh] = 1
I think numba can speed up your code
import numpy as np
import numba
P,Q = 1000,1000
thresh = 100
H = np.zeros((P,Q)
#jit(nopython=True)
def function(P, Q, thresh, H):
for u in range(P):
for v in range(Q):
if dist(u, v, P, Q) <= thresh:
H_LP[u, v] = 1
def dist(u, v, p, q):
return np.sqrt(np.square(u - p / 2) + np.square(v - q / 2))
import numpy as np
P,Q = 1000,1000
thresh = 100
idx_H = np.stack(np.indices((P, Q)), axis=-1)
H_LP = dist(idx_H[..., 0], idx_H[..., 1], P, Q) <= thresh
def dist(u, v, p, q):
return np.sqrt(np.square(u - p / 2) + np.square(v - q / 2))

root-finding algorithm for a complex polynomial equation in python

I am trying to solve for the following equation with a simple algorithm. I am not sure if the algorithm that I'm using is the best one or not but it is the only way that I could think of.
In this equation, everything other than P is known, and I am trying to solve for that. N is an array of counts, an i is the channel number. S(n) is the probability of having a certain n and C is binomial coefficient of (n, r). Pi is the probability in i channel and Pj is the probability in the previous channels with D distance to i. The code itself is not working but I believe that the main problem is in the way that I am trying to solve for it.
import numpy as np
import matplotlib.pyplot as plt
import math as ms
from scipy.misc import derivative
import scipy as sp
def next_guess(f, x):
slop = derivative(f, x, dx = 0.01)
return x - float(f(x))/slop
def my_newton(f, guess):
for i in range(30):
#print(guess)
guess = next_guess(f, guess)
return guess
def binomial(n, r):
dif = ms.factorial(n - r)
n = ms.factorial(n)
r = ms.factorial(r)
return (n/(r*dif))
def wrap_func(x, S = np.array([0.1, 0.5, 0.2, 0.1, 0.1]), D = 1, N = np.array([10, 15, 20, 1, 13])):
if type(x) == float:
z = np.zeros(1)
else:
z = np.zeros(x.shape[0])
N_tot = N.sum()
n_max = S.shape[0]
for i in range(z.shape[0]):
z[i] += my_newton(func(x, S = S, D = 1, N = N[i], n_max = n_max, N_tot = N_tot, i = i), i/100)
return z
def func(x, S = np.array([0.1, 0.5, 0.2, 0.1, 0.1]), D = 1, N = 0, n_max = 5, N_tot = 10, i = 0):
S_sum = 0
binom_sum = 0
y = 0
for n in range(n_max):
S_sum += S[n]
for r in range(n):
binom_sum += binomial(n, r)
y += S_sum * binom_sum * (x**r) * (1 - x - summ_x(x, D, i, S, N, n_max, N_tot))**(n-r)
return N_tot * y - N
def summ_x(x, D, i, S, N, n_max, N_tot):
j_min = max(i - D - 1, 0)
j_max = i - 1
x_values = 0
if i == 0:
return x_values
else:
for j in range(j_min, j_max):
x_values += func(x, S, D, N, n_max, N_tot, i)
return x_values
x = np.linspace(0, 1, 1000)
S = np.array([0.1, 0.5, 0.2, 0.1, 0.1])
N = np.random.choice(50, size = 1000)
print(my_newton(wrap_func, 0.1))
plt.plot(x, wrap_func(x, S = S, D = 1, N = N ))
plt.axhline(0, lw = 0.5, color = 'grey')
#plt.plot(my_newton(wrap_func, 1), wrap_func(my_newton(wrap_func, 1), S = S, D = 1, N = N), 'd')
plt.show()

Fitting a polynomial function for a vector field in python

At first, thank you everybody for the amazing work on stackoverflow... you guys are amazing and have helped me out quite some times already. Regarding my problem: I have a series of vectors in the format (VectorX, VectorY, StartingpointX, StartingpointY)
data = [(-0.15304757819399128, -0.034405679205349315, -5.42877197265625, 53.412933349609375), (-0.30532995491023485, -0.21523935094046465, -63.36669921875, 91.832427978515625), (-0.15872430479453215, -0.077999419482978283, -67.805389404296875, 81.001983642578125), (-0.36415549211687903, -0.33757147194808113, -59.015228271484375, 82.976226806640625), (0.0, 0.0, 0.0, 0.0), (-0.052973530805275004, 0.098212384392411423, 19.02667236328125, -13.72125244140625), (-0.34318724086483599, 0.17123742336019632, 80.0394287109375, 108.58499145507812), (0.19410169197834648, -0.17635303976555861, -55.603790283203125, -76.298828125), (-0.38774018337716143, -0.0824692384322816, -44.59942626953125, 68.402496337890625), (0.062202543524108478, -0.37219011831012949, -79.828826904296875, -10.764404296875), (-0.56582988168383963, 0.14872365390732512, 39.67657470703125, 97.303192138671875), (0.12496832467900276, -0.12216653754859408, 24.65948486328125, -30.92584228515625)]
When I plot the vectorfield it looks like this:
import numpy as np
import matplotlib.pyplot as plt
def main():
# Format Data...
numdata = len(data)
x = np.zeros(numdata)
y = np.zeros(numdata)
u = np.zeros(numdata)
v = np.zeros(numdata)
for i,el in enumerate(data):
x[i] = el[2]
y[i] = el[3]
# length of vector
z[i] = math.sqrt(el[0]**2+el[1]**2)
u[i] = el[0]
v[i] = el[1]
# Plot
plt.quiver(x,y,u,v )
# showing the length with color
plt.scatter(x, y, c=z)
plt.show()
main()
I want to create a polynomial function to fit a continous vector field for the whole area. After some research I found the following functions for fitting polynoms in two dimensions. The problem is, that it only accepts one value for the value that is fitted.
def polyfit2d(x, y, z, order=3):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
Also when I tried to fit the one dimensional length of the vectors, the values returned from the polyval2d were completely off. Does anybody know a method to get a fitted function that will return a vector (x,y) for any point in the grid?
Thank you!
A polynomial to fit a 2-d vector field will be two bivariate polynomials - one for the x-component and one for the y-component. In other words, your final polynomial fitting will look something like:
P(x,y) = ( x + x*y, 1 + x + y )
So you will have to call polyfit2d twice. Here is an example:
import numpy as np
import itertools
def polyfit2d(x, y, z, order=3):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def fmt1(x,i):
if i == 0:
return ""
elif i == 1:
return x
else:
return x + '^' + str(i)
def fmt2(i,j):
if i == 0:
return fmt1('y',j)
elif j == 0:
return fmt1('x',i)
else:
return fmt1('x',i) + fmt1('y',j)
def fmtpoly2(m, order):
for (i,j), c in zip(itertools.product(range(order+1), range(order+1)), m):
yield ("%f %s" % (c, fmt2(i,j)))
xs = np.array([ 0, 1, 2, 3] )
ys = np.array([ 0, 1, 2, 3] )
zx = np.array([ 0, 2, 6, 12])
zy = np.array([ 1, 3, 5, 7])
mx = polyfit2d(xs, ys, zx, 2)
print "x-component(x,y) = ", ' + '.join(fmtpoly2(mx,2))
my = polyfit2d(xs, ys, zy, 2)
print "y-component(x,y) = ", ' + '.join(fmtpoly2(my,2))
In this example our vector field is:
at (0,0): (0,1)
at (1,1): (2,3)
at (2,2): (6,5)
at (3,3): (12,7)
Also, I think I found a bug in polyval2d - this version gives more accurate results:
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z = z + a * x**i * y**j
return z

Categories

Resources