I try to use Lagrange multiplier to optimize a function, and I am trying to loop through the function to get a list of number, however I got the error
ValueError: setting an array element with a sequence.
Here is my code, where do I go wrong? If the n is not an array I can get the result correctly though
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X):
dLambda = np.zeros(len(X))
h = 1e-3
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX)-func(X-dX))/(2*h);
return dLambda
X1 = fsolve(dfunc, [1, 1, 0])
print (X1)
Helps would be appreciated, thank you very much
First, check func = fsolve()
Second, print(func([1,1,0]))` - result in not number ([2 2 2 2 2 2 2 2 2]), beause "n" is list. if you want to iterate n try:
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X,n):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X,n):
dLambda = np.zeros(len(X))
h = 1e-3
r = 0
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX,n)-func(X-dX,n))/(2*h)
return dLambda
for iter_n in n:
print("for n = {0} dfunc = {1}".format(iter_n,dfunc([0.8,0.4,0.3],iter_n)))
Related
I don't know how to add this array to an ordinary differential equation. I don't know how to select for each time the corresponding item.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
u = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
def f(z,t):
dzdt = np.zeros(2,)
x = z[0]
y = z[1]
dzdt[0] = (-x + u) / 2.0
dzdt[1] = (-y + x) / 5.0
return dzdt
z0 = [0,0]
n = 30
t = np.linspace(0,15, n)
u = np.zeros(n)
u[0] = 1
x = np.zeros(n)
y = np.zeros(n)
z = odeint(f,z0,t)
I'm trying to solve some ODE's using different methods and then printing and plotting my results. When I try to run it I get the error IndexError: index 2 is out of bounds for axis 0 with size 2
I know it has to do with the fact of the dimensions, but I thought that all of my dimensions were correct. Here is an example of each way I'm trying to solve the ode's
def f(t,x,y):
xprime = x - y + (2*t) - (t**2) - (t**3)
return xprime
def g(t,x,y):
yprime = x + y - (4*(t**2)) + (t**3)
return yprime
#Exact Solution
def exact(t):
y = np.zeros(len(t))
x = np.zeros(len(t))
for i in range(n):
cos_arr = np.cos(t)
sin_arr = np.sin(t)
y = np.exp(t) * cos_arr + t**2
x = np.exp(t) * sin_arr - t**3
return x, y
#Explicit Euler
def Eulerx(t0, tmax, x0, n):
t, dt = np.linspace(t0, tmax, n, retstep = True)
x = np.zeros(n)
y = np.zeros(n)
x[0] = x0
y[0] =y0
for i in range (n-1):
x[i+1] = x[i] + (dt/2) * f(t[i], x[i], y[i])
return t, x
#RK2
def RK2x(t0, tmax, x0, n):
t, dt = np.linspace(t0, tmax, n, retstep = True)
x = np.zeros(n)
y = np.zeros(n)
x[0] = x0
y[0]=y0
for i in range(n-1):
xK1 = f(t[i], x[i],y[i])
xK2 = f(t[i]+ dt, x[i] +dt * xK1, y[i])
x[i+1] = x[i] +(dt* (1/2)*(xK1 + xK2))
return t, x
#Classical RK4
def RK4x(t0, tmax, x0, n):
t, dt = np.linspace(t0, tmax, n, retstep = True)
x = np.zeros(n)
y = np.zeros(n)
x[0] = x0
y[0] =y0
for i in range(n-1):
x4K1 = f(t[i],x[i],y[i])
x4K2 = f(t[i]+((1/2)*dt), x[i]+ ((1/2)*dt*x4K1),y[i])
x4K3 = f(t[i] +((1/2)*dt), x[i] + ((1/2)*dt*x4K2),y[i])
x4K4 = f(t[i]+dt, x[i]+dt*x4K3,y[i])
x[i+1] = x[i] + (dt*(1/6)*(x4K1 + (2* x4K2) +(2*x4K3) +x4K4))
return t, x
if __name__ == '__main__':
t0 = 0
tmax = 1
x0 = 1
y0 = 0
n=50
[t,X1] = Eulerx(t0,tmax, x0,n)
[t,Y1] = Eulery(t0,tmax, y0,n)
[t, X2]= RK2x(t0,tmax, x0,n)
[t, Y2]= RK2y(t0,tmax, y0,n)
[t, X3]= RK4x(t0,tmax, x0,n)
[t, Y3]= RK4y(t0,tmax, y0,n)
x=exact(t)
y=exact(t)
abs_errx1= abs(x-X1)
abs_errx2= abs(x-X2)
abs_errx3= abs(x-X3)
print("=========================================================================")
print(" n Eulerx Eulery RK2x RK2y RK4x RK4y", end='\n')
for i in range(n):
print(abs_errx1[i], abs_erry1[i], abs_errx2[i], abs_erry2[i], abs_errx3[i], abs_erry3[i])
print("=========================================================================")
Your arrays abs_errx1, etc, are all size (2, 50). You are looking at abs_errx1[n], etc where n runs from 0 to 50. n is being used as the first dimension when you need it to be the second. I'm not sure what the first dimension is supposed to be.
General:
I am using maximum entropy to find distribution for on positive integers vectors, I can estimate the mean and variance, and have three equation I am trying to find a and b,
The equations:
integral(exp(a*x^2+bx+c) from (0 , infinity))-1
integral(xexp(ax^2+bx+c)from (0 , infinity))- mean
integral(x^2*exp(a*x^2+bx+c) from (0 , infinity))- mean^2 - var
(integrals between [0,∞))
The problem:
I am trying to use numerical solver and I used fsolve of sympy
But I guess I am missing some knowledge.
My code:
import numpy as np
import sympy as sym
from scipy.optimize import *
def myFunction(x,*data):
y = sym.symbols('y')
m,v=data
F = [0]*3
x[0] = - abs(x[0])
print(x)
F[0] = (sym.integrate(sym.exp(x[0] * y ** 2 + x[1] * y + x[2]), (y, 0,sym.oo)) -1).evalf()
F[1] = (sym.integrate(y*sym.exp(x[0] * y ** 2 + x[1] * y + x[2]), (y, 0,sym.oo))-m).evalf()
F[2] = (sym.integrate((y**2)*sym.exp(x[0] * y ** 2 + x[1] * y + x[2]), (y,0,sym.oo)) -v-m).evalf()
print(F)
return F
data = (10,3.5) # mean and var for example
xGuess = [1, 1, 1]
z = fsolve(myFunction,xGuess,args = data)
print(z)
my result are not that accurate, is there a better way to solve it?
integral(exp(a*x^2+bx+c))-1 = 5.67659292676884
integral(xexp(ax^2+bx+c))- mean = −1.32123173796713
integral(x^2*exp(a*x^2+bx+c))- mean^2 - var = −2.20825624606312
Thanks
I have rewritten the problem replacing sympy with numpy and lambdas (inline functions).
Also note that in your problem statement you subtract the third equation with $mean^2$, but in your code you only subtract $mean$.
import numpy as np
from scipy.optimize import minimize
from scipy.integrate import quad
def myFunction(x,data):
m,v=data
F = np.zeros(3) # use numpy array
# use scipy.integrade.quad for integration of lambda functions
# quad output is (result, error), so we just select the result value at the end
F[0] = quad(lambda y: np.exp(x[0] * y ** 2 + x[1] * y + x[2]), 0, np.inf)[0] -1
F[1] = quad(lambda y: y*np.exp(x[0] * y ** 2 + x[1] * y + x[2]), 0, np.inf)[0] -m
F[2] = quad(lambda y: (y**2)*np.exp(x[0] * y ** 2 + x[1] * y + x[2]), 0, np.inf)[0] -v-m**2
# minimize the squared error
return np.sum(F**2)
data = (10,3.5) # mean and var for example
xGuess = [-1, 1, 1]
z = minimize(lambda x: myFunction(x, data), x0=xGuess,
bounds=((None, 0), (None, None), (None, None))) # use bounds for negative first coefficient
print(z)
# x: array([-0.99899311, 2.18819689, 1.85313181])
Does this seem more reasonable?
I am trying to plot the error of this algorithm against h and I have run into a problem, for this error calculation, it cant use the first value, as it divides 0/0. How do I go about ignoring the first value where x =0? I basically need to start the summation on i=2 on line 46 (the absolute error one). Any help is much appreciated
import numpy
import matplotlib.pyplot as pyplot
from scipy.optimize import fsolve
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
rcParams['figure.figsize'] = (12,6)
printing = False
def rk3(A, bvector, y0, interval, N):
h = (interval[1] - interval[0]) / N
x = numpy.linspace(interval[0], interval[1], N+1)
y = numpy.zeros((len(y0), N+1))
y[:, 0] = y0
b = bvector
for i in range(N):
y_1 = y[:, i] + h *(numpy.dot(A, y[:, i]) + b(x[i]))
y_2= (3/4)*y[:, i] + 0.25*y_1+0.25* h* (numpy.dot(A,y_1)+b(x[i]+h))
y[:, i+1] = (1/3)*y[:, i] + (2/3)*y_2 + (2/3)*h*(numpy.dot(A,y_2)+b(x[i]+h))
return x, y
def exact( interval, N):
w = numpy.linspace(interval[0], interval[1], N+1)
z = numpy.array([numpy.exp(-1000*w),(1000/999)*(numpy.exp(-w)-numpy.exp(-1000*w))])
return w, z
A=numpy.array([[-1000,0],[1000,-1]])
def bvector(x):
return numpy.zeros(2)
y0=numpy.array([1,0])
interval=numpy.array([0,0.1])
N=numpy.arange(40,401,40)
h=numpy.zeros(len(N))
abs_err = numpy.zeros(len(N))
for i in range(len(N)):
interval=numpy.array([0,0.1])
h[i]=(interval[1] - interval[0]) / N[i]
x, y = rk3(A, bvector, y0, interval, N[i])
w,z=exact(interval,N[i])
abs_err[i] = h[i]*numpy.sum(numpy.abs((y[1,:]-z[1,:])/z[1,:]))
p = numpy.polyfit(numpy.log(h), numpy.log(abs_err),1)
fig = pyplot.figure(figsize = (12, 8), dpi = 50)
pyplot.loglog(h, abs_err, 'kx')
pyplot.loglog(h, numpy.exp(p[1]) * h**(p[0]), 'b-')
pyplot.xlabel('$h$', size = 16)
pyplot.ylabel('$|$Error$|$', size = 16)
pyplot.show()
Simply add an if for the value which is zero. so for example if the dividing variable is x.
if x>0:
#code here for the calculation
The above code will use all positive non-zero value. to only skip zero use this
if x!=0:
You can also us the three arguments of a for loop:
for a in range(start_value, end_value, increment):
so this means
for a in range(2,10,2):
print a
will give you the below result
2
4
6
8
I'm trying to maximize/minimize a function with two variables using Lagrange Multiplier method, below is my code
import numpy as np
from scipy.optimize import fsolve
Sa = 200
Sm = 100
n = 90
mu1 = 500
mu2 = 150
sigma1 = 25
sigma2 = 10
f = 0.9
def func(X):
u1 = X[0]
u2 = X[1]
L = X[2] # this is the multiplier. lambda is a reserved keyword in python
'function --> f(u1,u2) = u1**2 + u2**2'
'constraint --> g(u1,u2) = (Snf/a)**1/b - n = 0'
Snf = Sa/(1-Sm/(sigma1*u1 + mu1))
a = (f*(sigma1*u1 + mu1)**2)/(sigma2*u2 + mu2)
b = -1/3*(f*(sigma1*u1 + mu1))/(sigma2*u2 + mu2)
return (u1**2+u2**2 - L * ((Snf/a)**1/b) - n)
def dfunc(X):
dLambda = np.zeros(len(X))
h = 1e-3 # this is the step size used in the finite difference.
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX)-func(X-dX))/(2*h);
return dLambda
# this is the max
X1 = fsolve(dfunc, [1, 1, 0])
print (X1, func(X1))
# this is the min
X2 = fsolve(dfunc, [-1, -1, 0])
print (X2, func(X2))
When I try to do a simple function as the constraint such as u1+u2=4 or u1^2+u2^2 = 20, it works just fine , but when I try my actual constraint function it always gives this error, is there a reason why?? THanks for the help
C:\Python34\lib\site-packages\scipy\optimize\minpack.py:161:
RuntimeWarning: The iteration is not making good progress, as measured by the
improvement from the last five Jacobian evaluations.
warnings.warn(msg, RuntimeWarning)