Scipy optimize.minimize function - python

I try to solve nonlinear programming task using scipy.optimize.minimize
max r
x1**2 + y1**2 <= (1-r)**2
(x1-x2)**2 + (y1-y2)**2 >= 4*r**2
0 <= r <= 1
So I've wrote next code:
r = np.linspace(0, 1, 100)
x1 = np.linspace(0, 1, 100)
y1 = np.linspace(0, 1, 100)
x2 = np.linspace(0, 1, 100)
y2 = np.linspace(0, 1, 100)
fun = lambda r: -r
cons = ({'type': 'ineq',
'fun': lambda x1, r: [x1[0] ** 2 + x1[1] ** 2 - (1 - r) ** 2],
'args': (r,)},
{'type': 'ineq',
'fun': lambda x2, r: [x2[0] ** 2 + x2[1] ** 2 - (1 - r) ** 2],
'args': (r,)},
{'type': 'ineq',
'fun': lambda x1, x2, r: [(x1[0] - x2[0]) ** 2 + (x1[1] - x2[1]) ** 2 - 4 * r ** 2],
'args': (x2, r,)})
bnds = ((0, 1), (-1, 1), (-1, 1), (-1, 1), (-1, 1))
x0 = [0, 0, 0, 0, 0]
minimize(fun, x0, bounds=bnds, constraints=cons)
But I've got next error
File "C:\Anaconda2\lib\site-packages\scipy\optimize\slsqp.py", line 377, in _minimize_slsqp
c = concatenate((c_eq, c_ieq))
ValueError: all the input arrays must have same number of dimensions
Please, help me to find out my mistakes and write correct code
UPD:
Thx to #unutbu i've understand how to build it correctly.
fun = lambda x: -x[0]
cons = ({'type': 'ineq',
'fun': lambda x: -x[1] ** 2 - x[2] ** 2 + (1 - x[0]) ** 2},
{'type': 'ineq',
'fun': lambda x: -x[3] ** 2 - x[4] ** 2 + (1 - x[0]) ** 2},
{'type': 'ineq',
'fun': lambda x: (x[1] - x[3]) ** 2 + (x[1] - x[4]) ** 2 - 4 * x[0] ** 2})
bnds = ((0, 1), (-1, 1), (-1, 1), (-1, 1), (-1, 1))
x0 = [0.5, 0.3, 0.5, 0.3, 0.5]
answer = minimize(fun, x0, bounds=bnds, constraints=cons)
In task of minimization we have to lead constraints to such form:
g(x) >= 0
that's why constraints look like in that way.

Your parameter space appears to be 5-dimensional. A point in your parameter
space would be z = (r, x1, y1, x2, y2). Therefore the function to be minimized
-- and also the constraint functions -- should accept a point z and
return a scalar value.
Thus instead of
fun = lambda r: -r
use
def func(z):
r, x1, y1, x2, y2 = z
return -r
and instead of
lambda x1, r: [x1[0] ** 2 + x1[1] ** 2 - (1 - r) ** 2]
use
def con1(z):
r, x1, y1, x2, y2 = z
return x1**2 + y1**2 - (1-r)**2
and so on.
Note that simple constraints such as 0 <= r <= 1 can be handled by setting the bounds parameter instead of defining a constraint. And if the bounds for x1, y1, x2, y2 are from -1 to 1, then you might also want change
x1 = np.linspace(0, 1, 100)
...
to
x1 = np.linspace(-1, 1, 100)
...
However, the arrays r, x1, y1, x2, y2 are not needed to minimize func, so you could just as well eliminate them from the script entirely.
import numpy as np
import scipy.optimize as optimize
"""
max r
x1**2 + y1**2 <= (1-r)**2
(x1-x2)**2 + (y1-y2)**2 >= 4*r**2
0 <= r <= 1
"""
def func(z):
r, x1, y1, x2, y2 = z
return -r
def con1(z):
r, x1, y1, x2, y2 = z
return x1**2 + y1**2 - (1-r)**2
def con2(z):
r, x1, y1, x2, y2 = z
return 4*r**2 - (x1-x2)**2 - (y1-y2)**2
cons = ({'type': 'ineq', 'fun': con1}, {'type': 'ineq', 'fun': con2},)
bnds = ((0, 1), (-1, 1), (-1, 1), (-1, 1), (-1, 1))
guess = [0, 0, 0, 0, 0]
result = optimize.minimize(func, guess, bounds=bnds, constraints=cons)
print(result)
yields
fun: -1.0
jac: array([-1., 0., 0., 0., 0., 0.])
message: 'Optimization terminated successfully.'
nfev: 14
nit: 2
njev: 2
status: 0
success: True
x: array([ 1., 0., 0., 0., 0.])

Related

Calculate the angle between two lines (2 options) and efficiency

The task: calculate the angle between two lines
input:
float x1, y1 (starting point)
float x2, y2 (end point)
output:
float phi (angle in deg, between the lines)
additionally:
one line is parallel to the x-axis, and phi is 0° ≤ phi < 359°
Task Picture
option 1:
import math
rad_to deg = lambda x: 180.0/math.pi * x
# start-point
x1 = float(input("x1: "))
y1 = float(input("y1: "))
# end-point
x2 = float(input("x2: "))
y2 = float(input("y2: "))
# slope of one line (parallel to the x-axis)
m1 = 0
# slope between the start-point and end-point
# special-case: division with zero
if x1 == x2:
m2 = "Not defined!"
if y2 > y1: # end-point over start-point
phi = 90
elif y2 < y1: # end-point under start-point
phi = 270
else:
m2 = (y2 - y2) / (x2 - x1)
# angle between two lines (smaller angle: 0° < phi < 90°)
angle = rad_to_deg(math.atan(abs((m1 - m2) / (1 + m1 * m2))))
if x1 < x2 and y1 < y2: # 1. quadrant
phi = sw
elif x1 > x2 and y1 < y2: # 2. quadrant
phi = 180 - sw
elif x1 > x2 and y1 > y2: # 3. quadrant
phi = 180 + sw
elif x1 < x2 and y1 > y2: # 4. quadrant
phi = 360 - sw
elif y1 == y2 and x1 > x2: # end-point left from start-point
phi = 180
elif y1 == y2 and x1 < x2 : # end-point right from start-point
phi = 0
elif x1 == x2 and y1 == y2:
phi = "Error, start-point is end-point"
print("angle phi: " + str(phi))
or should be the special-case with try/except?
try:
m2 = (y2 - y2) / (x2 - x1)
except ZeroDivisionError: # x1 == x2
m2 = "Not defined!"
if y2 > y1: # end-point over start-point
phi = 90
elif y2 < y1: # end-point under start-point
phi = 270
option 2: with vectors
for math import sqrt, acos, pi
rad_to_deg = lambda x: 180.0/pi * x
x1 = float(input("x1: "))
y1 = float(input("y1: "))
x2 = float(input("x1: "))
y2 = float(input("x2: "))
# vectors
u = [1, 0] # start-point and (a point right from the start-point)
v = [x2 - x1, y2 - y1]
# calculation
u_scalar_v = u[0] * v[0] + u[1] * v[1]
u_amount = sqrt(u[0] * u[0] + u[1] * u[1])
v_amount = sqrt(v[0] * v[0] + v[1] * v[1])
# scalar product
phi = round(rad_to_deg(acos(u_scalar_v / (u_amount * v_amount))), 5)
if y2 >= y1:
pass
else:
phi = 360 - phi
print(phi)
option 3:
is to see the start-point, the end-point and a point right from s, as a triangle and to calculate over the cosine theorem
What is the most efficient way to calculate this and how can I decide it?
Find the Angle between three points from 2D using python provides a simple solution.
import math
def getAngle(a, b, c):
ang = math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0]))
return ang + 360 if ang < 0 else ang
# starting-point
x1 = float(input("x1: "))
y1 = float(input("y1: "))
# middle-point (intersection point)
x2 = float(input("x2: "))
y2 = float(input("y2: "))
# ending point of horizontal line
x3 = x2 + 1 # i.e. horizontal offset from mid-point (x2, y2)
y3 = y2
a = (x1, y1)
b = (x2, y2)
c = (x3, y3)
angle = getAngle(a, b, c)
Example
a = (5, 0)
b = (0, 0)
c = (0, 5)
print(getAngle(a, b, c)) # result 90.0
Example 2--test with random points
from random import randrange, sample
radius = 10
points = []
for i1 in range(10):
for i2 in range(10):
points.append((randrange(-radius, radius+1), randrange(-radius, radius+1)))
x1y1 = sample(points[:50], 10)
x2y2 = sample(points[50:], 10)
x3y3 = [(x+1, y) for x, y in x2y2]
for i in range(len(x1y1)):
a, b, c = x1y1[i], x2y2[i], x3y3[i]
angle = getAngle(a, b, c)
print(i, ": ", a, b, c, '=> ', angle)
Result
0 : (10, -6) (8, -10) (9, -10) => 296.565051177078
1 : (0, -9) (-4, -3) (-3, -3) => 56.309932474020215
2 : (-6, 10) (5, 9) (6, 9) => 185.1944289077348
3 : (0, 1) (-2, 1) (-1, 1) => 0.0
4 : (2, -1) (-3, 7) (-2, 7) => 57.9946167919165
5 : (2, -3) (-10, -8) (-9, -8) => 337.3801350519596
6 : (2, -6) (-10, 5) (-9, 5) => 42.510447078000844
7 : (7, 8) (7, 3) (8, 3) => 270.0
8 : (2, -2) (-4, 4) (-3, 4) => 45.0
9 : (1, -2) (-2, 7) (-1, 7) => 71.56505117707799

About "Scipy.optimize": message: 'Desired error not necessarily achieved due to precision loss

I am implementing threshold regression using SCAD penalty function, SCAD has its own defined derivative, so I also implement the function of SCAD derivative.I pass the derivative to the "optimize"'s jac parameters.
I first used the target function without the SCAD penalty function to give the initial guess, and it can work.
However, when using the penalty function with SCAD to solve the problem, the iterator cannot give correct results, and returns 'Desired error not necessarily achieved due to precision loss.'
What shall i do?
import pandas as pd
import scipy as sc
from scipy import stats as st
import numpy as np
from scipy import optimize as opt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
def loss_SCAD(beta_matrix):
M_MSE = np.dot(x, beta_matrix[:tmp_lt])[:, None]
M_c = np.array(beta_matrix[-M:], ndmin=1)
scad = 0
for idx, c in enumerate(M_c):
nor_distri_x = (x[:, 1][:, None] - c) / tmp_h
nor_distri_CDF = st.norm.cdf(nor_distri_x) # 光滑示性函数
M_MSE += np.dot(x, beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)])[:, None] * nor_distri_CDF
beta_param = np.sum(np.abs(beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)]))
if beta_param > a * tmp_lam:
scad += 0.5 * ((a + 1) * tmp_lam ** 2)
elif beta_param > tmp_lam:
scad += ((a ** 2 - 1) * tmp_lam ** 2 - (beta_param - a * tmp_lam) ** 2) / (2 * (a - 1))
else:
scad += tmp_lam * beta_param
MSE = 0.5 * np.average((y - M_MSE) ** 2) + scad
return MSE
def loss_derivative(beta_matrix):
deriv_list = []
other_M_MSE, nor_distri_CDF = 0, 0
first_M_MSE = y - np.dot(x, beta_matrix[:tmp_lt])[:, None]
M_c = np.array(beta_matrix[-M:], ndmin=1)
for idx, c in enumerate(M_c):
nor_distri_x = (x[:, 1][:, None] - c) / tmp_h
nor_distri_CDF = st.norm.cdf(nor_distri_x) # 光滑示性函数
other_M_MSE += np.dot(x, beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)])[:, None] * nor_distri_CDF
square_part = first_M_MSE - other_M_MSE
for idx, beta_param in enumerate(beta_matrix[:tmp_lt]): # 更新梯度:第一段
deriv_list.append(np.average(square_part * -x[:, idx][:, None]))
for idx, c in enumerate(M_c):
beta_param = np.sum(np.abs(beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)]))
if beta_param == 0: # 更新梯度:SCAD函数
scad = 0
elif beta_param > tmp_lam:
scad = np.max(((a * tmp_lam - beta_param) / (a - 1), 0))
else:
scad = tmp_lam
for i in range(tmp_lt): # 更新梯度:分段
deriv_list.append(np.average(square_part * -x[:, i][:, None] * nor_distri_CDF) + scad)
for idx, c in enumerate(M_c): # 更新梯度:门槛参数
nor_distri_PDF = st.norm.pdf((tmp_x - c) / tmp_h)
deriv_list.append(np.average(square_part * np.dot(x, beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)])[:, None] * nor_distri_PDF / tmp_h))
return np.array(deriv_list)
def loss(beta_matrix):
M_MSE = np.dot(x, beta_matrix[:tmp_lt])[:, None]
M_c = np.array(beta_matrix[-M:], ndmin=1)
for idx, c in enumerate(M_c):
nor_distri_x = (x[:, 1][:, None] - c) / tmp_h
nor_distri_CDF = st.norm.cdf(nor_distri_x) # 光滑示性函数
M_MSE += np.dot(x, beta_matrix[tmp_lt * (idx + 1):tmp_lt * (idx + 2)])[:, None] * nor_distri_CDF
MSE = 0.5 * np.average((y - M_MSE) ** 2)
return MSE
np.random.seed(1171359)
a = 3.7
tmp_x = np.random.normal(loc=0, scale=2, size=(400, 1))
tmp_beta = [2, -2, 5, 2, 9, 7]
tmp_c = [-np.sqrt(2), np.sqrt(2)]
M = np.shape(tmp_c)[0]
x1, x2, x3 = tmp_x[tmp_x < tmp_c[0]], tmp_x[(tmp_x >= tmp_c[0]) & (tmp_x < tmp_c[1])], tmp_x[tmp_x >= tmp_c[1]]
y1, y2, y3 = tmp_beta[0] + tmp_beta[1] * x1, tmp_beta[2] + tmp_beta[3] * x2, tmp_beta[4] + tmp_beta[5] * x3
x = np.reshape(np.concatenate((x1, x2, x3)), (-1, 1))
y = np.reshape(np.concatenate((y1, y2, y3)) + np.random.randn(400), (-1, 1))
x = np.concatenate((np.ones_like(x), x), axis=1)
tmp_lt = x.shape[1]
tmp_h = np.log(400) / 400 * tmp_x.std(ddof=1)
tmp_lam = 0.5
tmp_res = opt.minimize(fun=loss, x0=np.append(np.ones(7), 2))
tmp_result = opt.minimize(fun=loss_SCAD, x0=tmp_res.x, jac=loss_derivative)
# print(tmp_result.x, tmp_res.x, sep='\n')
k = tmp_result.x[:-2].reshape((2, -1), order='F')
y_hat_1 = np.dot(x[x[:, 1] < tmp_result.x[-2]], k[:, 0].reshape((-1, 1)))
y_hat_2 = np.dot(x[(x[:, 1] >= tmp_result.x[-2]) & (x[:, 1] < tmp_result.x[-1])], k[:, 0].reshape((-1, 1)) + k[:, 1].reshape((-1, 1)))
y_hat_3 = np.dot(x[x[:, 1] >= tmp_result.x[-1]], k[:, 0].reshape((-1, 1)) + k[:, 1].reshape((-1, 1)) + k[:, 2].reshape((-1, 1)))
print(k, tmp_result.fun, tmp_result, sep='\n')
fig = plt.figure(111, figsize=(6, 6))
plt.plot(x[:, 1][x[:, 1] < tmp_result.x[-2]], y_hat_1, c='orange', alpha=0.6, linewidth=4)
plt.plot(x[:, 1][(x[:, 1] >= tmp_result.x[-2]) & (x[:, 1] < tmp_result.x[-1])], y_hat_2, c='blue', alpha=0.6, linewidth=4)
plt.plot(x[:, 1][x[:, 1] >= tmp_result.x[-1]], y_hat_3, c='green', alpha=0.6, linewidth=4)
plt.plot(x1, y1, c='black', alpha=0.6, linewidth=4)
plt.plot(x2, y2, c='black', alpha=0.6, linewidth=4)
plt.plot(x3, y3, c='black', alpha=0.6, linewidth=4)
plt.scatter(x[:, 1], y, s=12, alpha=0.5, c='gray')
plt.axis([tmp_x.min() * 1.05, tmp_x.max() * 1.05, y.min() * 1.05, y.max() * 1.05])
plt.show()
fun: 1.6869804934437402
hess_inv: array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
jac: array([ 1.98384534e-07, -2.81814249e-07, -1.51727486e-08, -2.17460279e-08,
-1.51727486e-08, -2.17460279e-08, -1.15503569e+00, -6.99892939e-01])
message: 'Desired error not necessarily achieved due to precision loss.'
nfev: 84
nit: 0
njev: 72
status: 2
success: False
x: array([ 1.93751362, -2.04506983, 3.09129046, 4.13404934, 4.32073797,
4.87646363, -1.391239 , 1.4099789 ])

How to loop through lists from pandas dataframe in a function

Here is my dataframe,
df = pd.DataFrame({'Id': [102,103,104,303,305],'ExpG_Home':[1.8,1.5,1.6,1.8,2.9],
'ExpG_Away':[2.2,1.3,1.2,2.8,0.8],
'HomeG_Time':[[93, 109, 187],[169], [31, 159],[176],[16, 48, 66, 128]],
'AwayG_Time':[[90, 177],[],[],[123,136],[40]]})
First, I need to create an array y, for a given Id number, it takes values from same row (ExpG_Home & ExpG_Away).
y = [1 - (ExpG_Home + ExpG_Away), ExpG_Home, ExpG_Away]
Second, I found this much harder, for the Id used in creating y, the function below takes the corresponding lists from HomeG_Time & AwayG_Time and creates an array. Unfortunately, my function takes one row at a time. I need to do this for a large dataset.
x1 = [1,0,0]
x2 = [0,1,0]
x3 = [0,0,1]
total_timeslot = 200 # number of timeslot per game.
k = 1 # constant
#For Id=102 with ExpG_Home=2.2 and ExpG_Away=1.8
HomeG_Time = [93, 109, 187]
AwayG_Time = [90, 177]
y = np.array([1-(2.2 + 1.8)/k, 2.2/k, 1.8/k])
# output of y = [0.98 , 0.011, 0.009]
def squared_diff(x1, x2, x3, y):
ssd = []
for k in range(total_timeslot):
if k in HomeG_Time:
ssd.append(sum((x2 - y) ** 2))
elif k in AwayG_Time:
ssd.append(sum((x3 - y) ** 2))
else:
ssd.append(sum((x1 - y) ** 2))
return ssd
sum(squared_diff(x1, x2, x3, y))
Out[37]: 7.880400000000012
This output is for the first row only.
Here is the complete snippet given,
>>> import numpy as np
>>> x1 = np.array( [1,0,0] )
>>> x2 = np.array( [0,1,0] )
>>> x3 = np.array( [0,0,1] )
>>> total_timeslot = 200
>>> HomeG_Time = [93, 109, 187]
>>> AwayG_Time = [90, 177]
>>> ExpG_Home=2.2
>>> ExpG_Away=1.8
>>> y = np.array( [1 - (ExpG_Home + ExpG_Away), ExpG_Home, ExpG_Away] )
>>> def squared_diff(x1, x2, x3, y):
... ssd = []
... for k in range(total_timeslot):
... if k in HomeG_Time:
... ssd.append(sum((x2 - y) ** 2))
... elif k in AwayG_Time:
... ssd.append(sum((x3 - y) ** 2))
... else:
... ssd.append(sum((x1 - y) ** 2))
... return ssd
...
>>> sum(squared_diff(x1, x2, x3, y))
4765.599999999989
Assuming this. Calculate y as (N,3) using pandas.DataFrame.apply
>>> y = np.array( df.apply(lambda row: [1 - (row.ExpG_Home + row.ExpG_Away),
... row.ExpG_Home, row.ExpG_Away ],
... axis=1).tolist() )
>>> y.shape
(5, 3)
Now calcualte squared error for a given x
>>> def squared_diff(x, y):
... return np.sum( np.square(x - y), axis=1)
In your case, if error2 is squared_diff(x2,y) you are adding this the number of occuerences of HomeG_Time
>>> n3 = df.AwayG_Time.apply(len)
>>> n2 = df.HomeG_Time.apply(len)
>>> n1 = 200 - (n2 + n3)
The final sum of squared error is (as per your calculation)
>>> squared_diff(x1, y) * n1 + squared_diff(x2, y) * n2 + squared_diff(x3, y) * n3
0 4766.4
1 2349.4
2 2354.4
3 6411.6
4 4496.2
dtype: float64
>>>
try this,
import pandas as pd
import numpy as np
df = pd.DataFrame({'Id': [102,103,104,303,305],'ExpG_Home':[1.8,1.5,1.6,1.8,2.9],
'ExpG_Away':[2.2,1.3,1.2,2.8,0.8],
'HomeG_Time':[[93, 109, 187],[169], [31, 159],[176],[16, 48, 66, 128]],
'AwayG_Time':[[90, 177],[],[],[123,136],[40]]})
x1 = [1,0,0]
x2 = [0,1,0]
x3 = [0,0,1]
k=1
total_timeslot = 200 # number of timeslot per game.
def squared_diff(x1, x2, x3,AwayG_Time,HomeG_Time, y):
ssd = []
for k in range(total_timeslot):
if k in HomeG_Time:
ssd.append(sum((x2 - y) ** 2))
elif k in AwayG_Time:
ssd.append(sum((x3 - y) ** 2))
else:
ssd.append(sum((x1 - y) ** 2))
return ssd
s=pd.DataFrame( pd.concat([df,1-(df['ExpG_Home']+df['ExpG_Away'])/k,df['ExpG_Home']/k,df['ExpG_Away']/k],axis=1).values)
df['res']=s.apply(lambda x: sum(squared_diff(x1,x2,x3,x[0],x[3],np.array([x[5],x[6],x[7]]))),axis=1)
del s
print df
Output:
AwayG_Time ExpG_Away ExpG_Home HomeG_Time Id res
0 [90, 177] 2.2 1.8 [93, 109, 187] 102 4766.4
1 [] 1.3 1.5 [169] 103 2349.4
2 [] 1.2 1.6 [31, 159] 104 2354.4
3 [123, 136] 2.8 1.8 [176] 303 6411.6
4 [40] 0.8 2.9 [16, 48, 66, 128] 305 4496.2
def squared_diff(row):
y = np.array([1 - (row.ExpG_Home + row.ExpG_Away), row.ExpG_Home, row.ExpG_Away])
HomeG_Time = row.HomeG_Time
AwayG_Time = row.AwayG_Time
x1 = np.array([1, 0, 0])
x2 = np.array([0, 1, 0])
x3 = np.array([0, 0, 1])
total_timeslot = 200
ssd = []
for k in range(total_timeslot):
if k in HomeG_Time:
ssd.append(sum((x2 - y) ** 2))
elif k in AwayG_Time:
ssd.append(sum((x3 - y) ** 2))
else:
ssd.append(sum((x1 - y) ** 2))
return sum(ssd)
df.apply(squared_diff, axis=1)
Out[]:
0 4766.4
1 2349.4
2 2354.4
3 6411.6
4 4496.2

Solving a system of non linear equations and inequalities at once in SymPy

I'm new to SymPy and python and I was faced with a problem.
I'm trying to solve a system 'kunSys':
>>> kunSys
[-w0 + w1 - 8*x1 + 20,
-2*w0 + w2 - 8*x2 + 4,
w0*(-x1 - 2*x2 + 2),
w1*x1,
w2*x2,
w0 >= 0,
w1 >= 0,
w2 >= 0]
With a list of variables 'lagVars':
>>> lagVars
(x1, x2, w0, w1, w2)
As you can see, my system contains both eqations and inequalities.
Trying:
>>> solve(kunSys,lagVars)
Get:
NotImplementedError:
inequality has more than one symbol of interest
But it works fine when solving eqations and inequalities separately:
>>> kunSys[:5]
[-w0 + w1 - 8*x1 + 20,
-2*w0 + w2 - 8*x2 + 4,
w0*(-x1 - 2*x2 + 2),
w1*x1,
w2*x2]
>>> solve(kunSys[:5],lagVars)
[(0, 0, 0, -20, -4),
(0, 1/2, 0, -20, 0),
(0, 1, -2, -22, 0),
(2, 0, 4, 0, 4),
(11/5, -1/10, 12/5, 0, 0),
(5/2, 0, 0, 0, -4),
(5/2, 1/2, 0, 0, 0)]
>>> kunSys[5:]
[w0 >= 0, w1 >= 0, w2 >= 0]
>>> solve(kunSys[5:],lagVars)
(0 <= w0) & (0 <= w1) & (0 <= w2) & (w0 < oo) & (w1 < oo) & (w2 < oo)
But this is not a wanted result.
I tried to use solveset() but it doesn't seem to work also.
I googled a lot, but failed to find the answer.
Question:
How do I solve this system?
SymPy presently doesn't know how to handle mixed inequalities and equalities, but since your inequalities are just variable >= 0, you can work around this by just defining those symbols to be nonnegative. solve will then filter the solutions based on that
>>> w0, w1, w2 = symbols('w0:3', nonnegative=True)
>>> x1, x2 = symbols("x1 x2")
>>> solve([-w0 + w1 - 8*x1 + 20,
... -2*w0 + w2 - 8*x2 + 4,
... w0*(-x1 - 2*x2 + 2),
... w1*x1,
... w2*x2], (w0, w1, w2, x1, x2))
[(0, 0, 0, 5/2, 1/2), (12/5, 0, 0, 11/5, -1/10), (4, 0, 4, 2, 0)]

Functional Constrained Optimization Python Incorrect Answer

I'm trying to minimize a functional (integral of a function), given constraints
def s(y,a,b,c,d):
v = [1, y, y**2, y**3]
alpha = [a, b, c, d]
q = np.inner(v,alpha)
return -q*np.exp(-q)
def p(y,a,b,c,d):
v = [1, y, y**2, y**3]
alpha = [a, b, c, d]
q = np.inner(v,alpha)
return np.exp(-q)
def Q(u):
a, b, c, d = u
d = integrate.quad(lambda y: s(y,a,b,c,d), 0, 1)
return d[0]
cons = ({'type': 'eq', 'fun' : integrate.quad(lambda y: p(y,a,b,c,d), 0, 1)[0]-1},
{'type': 'eq', 'fun' : integrate.quad(lambda y: (p(y,a,b,c,d)*y), 0, 1)[0]-0.483523521402009},
{'type': 'eq', 'fun' : integrate.quad(lambda y: (p(y,a,b,c,d)*y**2), 0, 1)[0]-0.300458990347083},
{'type': 'eq', 'fun' : integrate.quad(lambda y: (p(y,a,b,c,d)*y**3), 0, 1)[0]-0.209996591802522})
res = minimize(Q, x0 = (0, 0, 0, 0), method='BFGS', constraints=cons)
print(res)
I get an output
fun: -0.36787942624169967
hess_inv: array([[ 17.98311921, -49.74794121, 2.50822967, 36.21942131],
[ -49.74794121, 191.70720321, -23.14586623, -158.65310285],
[ 2.50822967, -23.14586623, 8.1640543 , 25.72129091],
[ 36.21942131, -158.65310285, 25.72129091, 142.59127393]])
jac: array([ -3.54647636e-06, -1.94460154e-06, -1.75461173e-06,
3.24100256e-07])
message: 'Optimization terminated successfully.'
nfev: 126*
*nit: 19
njev: 21
status: 0
success: True
x: array([ 0.99920744, 0.0092224 , -0.02276881, 0.0150456 ])**
However using this array of X, does not satisfy the constraint
x = (0.99920744, 0.0092224 , -0.02276881, 0.0150456)
integrate.quad(lambda y: p(y,x[0],x[1],x[2],x[3]), 0, 1)[0]
0.3678829742546207
which is not 1, as specifically stated in the constraint. How is it claiming convergence when it clearly has not converged?

Categories

Resources