Not iterable Error with constraints in minimize from scipy.optimize - python

I just started studying optimization with Python and I am facing an issue.
I have a problem where I want to minimize my objective function (obj_fun) using minimize from scipy.optimize.
I will share an example:
import numpy as np
def analysis(A):
N = []
for i in A:
N.append(i*3)
return N
def cons(A):
N = analysis(A)
C = []
for i in len(N):
if N[i] < 2:
C.append({'type': 'ineq', 'fun': lambda x: x[0]*N[i]})
else:
C.append({'type': 'ineq', 'fun': lambda x: x[0]-N[i]})
return C
def obj_fun(A):
"""Objective function returns the weight of the structure"""
w= 0.5*[1*A[0]+2*A[1]+3*A[2]]
return w
# Initial values
A0 = np.array([0.001 for i in range(0, 3)])
N = analysis(A0)
## Optimization
bnds = [(1e-6, None) for i in range(len(A0))]
from scipy.optimize import minimize
sol = minimize(obj_fun, x0=A0, method='trust-constr', bounds=bnds,
constraints=cons)
print(sol)
The whole error I get is:
runfile('C:/Users/Myc/Documents/Python Scripts/example stack.py', wdir='C:/Users/Myc/Documents/Python Scripts')
Traceback (most recent call last):
File "C:\Users\Myc\Documents\Python Scripts\example stack.py", line 40, in
sol = minimize(obj_fun, x0=A0, method='trust-constr', bounds=bnds, constraints=cons)
File "C:\Users\Myc\anaconda3\lib\site-packages\scipy\optimize_minimize.py", line 605, in minimize
constraints = standardize_constraints(constraints, x0, meth)
File "C:\Users\Myc\anaconda3\lib\site-packages\scipy\optimize_minimize.py", line 825, in standardize_constraints
constraints = list(constraints) # ensure it's a mutable sequence
TypeError: 'function' object is not iterable
I know the main problem is how i define the constraints and I could replace constraints=cons for constraints = Cons1 if i define Cons1 = rest(A0) before the optimization.
However that wouldn't help me because I need the function trus_analysis to be executed on every iteration of the optimization in order to update the parameters N for the restrictions.
How can I define the constraints?

The original script:
def obj_fun(A):
return 7*A[0]+ 3*A[1]+ 7*A[2]
def analysis(A):
N = []
for i in A:
N.append(i*3)
return N
def cons(A):
n = analysis(A)
C = []
for i in range(len(A)):
if n[i] < 4:
C.append({'type': 'ineq', 'fun': lambda x: x[i]**2 / n[i]})
else:
C.append({'type': 'ineq', 'fun': lambda x: x[i] - n[i]})
return C
A0 = [1,2,3]
C = cons(A0)
bnds = [(1e-6, None) for i in range(len(A0))]
from scipy.optimize import minimize
sol = minimize(obj_fun, x0=A0, method='trust-constr', bounds=bnds, constraints=C)
print(sol)
runs with:
/usr/local/lib/python3.8/dist-packages/scipy/optimize/_hessian_update_strategy.py:182: UserWarning: delta_grad == 0.0. Check if the approximated function is linear. If the function is linear better results can be obtained by defining the Hessian as zero instead of using quasi-Newton approximations.
warn('delta_grad == 0.0. Check if the approximated '
barrier_parameter: 0.00016000000000000007
barrier_tolerance: 0.00016000000000000007
cg_niter: 15
cg_stop_cond: 1
constr: [array([9.00009143]), array([4.57149698e-05]), array([4.57149698e-05]), array([2.38571416e-05, 5.43334162e-05, 9.00004571e+00])]
constr_nfev: [40, 40, 40, 0]
constr_nhev: [0, 0, 0, 0]
constr_njev: [0, 0, 0, 0]
constr_penalty: 1.0
constr_violation: 0.0
execution_time: 0.0873115062713623
fun: 63.00065000502843
grad: array([7. , 3. , 6.99999999])
jac: [array([[0. , 0. , 2.00001017]]), array([[0., 0., 1.]]), array([[0., 0., 1.]]), array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])]
lagrangian_grad: array([1.77635684e-15, 1.55431223e-14, 5.67948534e-14])
message: '`gtol` termination condition is satisfied.'
method: 'tr_interior_point'
nfev: 40
nhev: 0
nit: 14
niter: 14
njev: 10
optimality: 5.679485337974424e-14
status: 1
success: True
tr_radius: 18734.614693588483
v: [array([-1.77775972e-05]), array([-3.49997333]), array([-3.49997333]), array([-7.00000000e+00, -3.00000000e+00, -1.77776895e-05])]
x: array([2.38571416e-05, 5.43334162e-05, 9.00004571e+00])
Here
In [36]: C
Out[36]:
[{'type': 'ineq', 'fun': <function __main__.cons.<locals>.<lambda>(x)>},
{'type': 'ineq', 'fun': <function __main__.cons.<locals>.<lambda>(x)>},
{'type': 'ineq', 'fun': <function __main__.cons.<locals>.<lambda>(x)>}]
A0 is used the create the 3 constraint functions.
The analysis function just multiplies A by 3.
In [38]: analysis(A0)
Out[38]: [3, 6, 9]
In [39]: A0
Out[39]: [1, 2, 3]
In [40]: analysis(A0)
Out[40]: [3, 6, 9]
In [41]: np.array(A0)*3
Out[41]: array([3, 6, 9])
In the latest cons you dropped the range, and use cons directly rather than cons(A0). The constraints parameter is supposed to be a list of dict, as shown in C.

Related

AttributeError: 'float' object has no attribute 'gradient' scipy python

I am optimizing a linear function with scipy,
def func(weights):
var = ['x1', 'x2', 'x3', 'x4']
if weights is None:
weights = np.ones(len(var)) / len(var)
return len(set([var[i] for i in range(len(weights)) if weights[i]>0]))/len(var)
res = minimize(lambda x: func(x), x0=[0.25,0.25,0.25,0.25],method='SLSQP',
jac=ad.gh(lambda x: func(x))[0], bounds=((0.,1.),)*4,
options = {'disp':True, 'ftol': 1e-20, 'maxiter': 1000},
constraints= {'type': 'eq', 'fun': lambda x: sum(x) - 1.0})
I am getting the following error.
Traceback (most recent call last):
File "D:/applicatio/Sub Applicatio/main.py", line 338, in <module>
constraints= {'type': 'eq', 'fun': lambda x: sum(x) - 1.0})
File "C:\Users\hp\Downloads\WinPython-64bit-3.5.1.2\python-3.5.1.amd64\lib\site-packages\scipy\optimize\_minimize.py", line 455, in minimize
constraints, callback=callback, **options)
File "C:\Users\hp\Downloads\WinPython-64bit-3.5.1.2\python-3.5.1.amd64\lib\site-packages\scipy\optimize\slsqp.py", line 383, in _minimize_slsqp
g = append(fprime(x),0.0)
File "C:\Users\hp\Downloads\WinPython-64bit-3.5.1.2\python-3.5.1.amd64\lib\site-packages\scipy\optimize\optimize.py", line 289, in function_wrapper
return function(*(wrapper_args + args))
File "C:\Users\hp\Downloads\WinPython-64bit-3.5.1.2\python-3.5.1.amd64\lib\site-packages\ad\__init__.py", line 1090, in grad
return numpy.array(ans.gradient(list(xa)))
AttributeError: 'float' object has no attribute 'gradient'
How can I optimize this kind of simple linear function ? Any suggestions ? Thanks.
What on earth is your func doing?
def func(weights):
....
return len(set())/len(var)
You get a set object, and then it's length, the number of terms. What does that represent? That's not linear; it takes integer jumps.
In [318]: x0=[0.25,0.25,0.25,0.25]
In [319]: def func(weights):
...: var = ['x1', 'x2', 'x3', 'x4']
...: if weights is None:
...: weights = np.ones(len(var)) / len(var)
...: return len(set([var[i] for i in range(len(weights)) if weights
...: [i]>0]))/len(var)
...:
In [320]: func(x0)
Out[320]: 1.0
In [321]: x0=np.array(x0)
In [322]: func(x0)
Out[322]: 1.0
In [323]: func(x0+.1)
Out[323]: 1.0
In [324]: func(x0-.1)
Out[324]: 1.0
In [325]: func(x0-1)
Out[325]: 0.0
In fact all it does is count how many of the x0 values are >0 and divide by 4 - so it generates 0,.25,.5,.75 or 1.
minimize is going start with x0, and figure out how func(x0) varies with small changes in x0.
And your jac, is something based of this func as well, jac=ad.gh(lambda x: func(x))[0]
==============
I don't think you need to use the lambda
`lambda x: func(x)`
Just give func as the argument. It takes the correct number of arguments (e.g. the initial x0).
===================
Running your code, but without the jac parameter (I don't know what ad.gh is):
In [543]: def func(weights):
...: var = ['x1', 'x2', 'x3', 'x4']
...: if weights is None:
...: weights = np.ones(len(var)) / len(var)
...: return len(set([var[i] for i in range(len(weights)) if weights
...: [i]>0]))/len(var)
...:
In [544]: optimize.minimize(lambda x: func(x), x0=[0.25,0.25,0.25,0.25],method='
...: SLSQP',bounds=((0.,1.),)*4,options = {'disp':True, 'ftol': 1e-20, 'max
...: iter': 1000},constraints= {'type': 'eq', 'fun': lambda x: sum(x) - 1.0
...: })
Optimization terminated successfully. (Exit mode 0)
Current function value: 1.0
Iterations: 1
Function evaluations: 6
Gradient evaluations: 1
Out[544]:
fun: 1.0
jac: array([ 0., 0., 0., 0., 0.])
message: 'Optimization terminated successfully.'
nfev: 6
nit: 1
njev: 1
status: 0
success: True
x: array([ 0.25, 0.25, 0.25, 0.25])
It looks like it's trying small changes around x0, and finds that there isn't any variation (small changes don't make any elements go to 0). To put it another way, your func is already at a local minimum, a flat region.

Scipy minimize constrained function

I am solving the following optimization problem:
with this Python code:
from scipy.optimize import minimize
import math
def f(x):
return math.log(x[0]**2 + 1) + x[1]**4 + x[0]*x[2]
x0 = [0, 0, 0]
cons=({'type': 'ineq',
'fun': lambda x: x[0]**3 - x[1]**2 - 1},
{'type': 'ineq',
'fun': lambda x: x[0]},
{'type': 'ineq',
'fun': lambda x: x[2]})
res = minimize(f, x0, constraints=cons)
print res
I am getting an error
message: 'Inequality constraints incompatible'
What can cause this error?
The issue seems to be with your initial guess. If I change your starting values to
x0 = [1.0, 1.0, 1.0]
Then your code will execute fine (at least on my machine)
Python 3.5.1 (v3.5.1:37a07cee5969, Dec 6 2015, 01:54:25) [MSC v.1900 64 bit (AMD64)] on win32
message: 'Optimization terminated successfully.'
njev: 10
jac: array([ 1., 0., 1., 0.])
fun: 0.6931471805582502
nit: 10
status: 0
x: array([ 1.00000000e+00, -1.39724765e-06, 1.07686548e-14])
success: True
nfev: 51
Scipy's optimize module has lots of options. See the documentation or this tutorial. Since you didn't specify the method here, it will use Sequential Least SQuares Programming (SLSQP). Alternatively, you could use the Trust-Region Constrained Algorithm (trust-const).
For this problem, I found that trust-const seemed much more robust to starting values than SLSQP, handling starting values from [-2,-2,-2] to [10,10,10], although negative initial values resulted in increased iterations, as you'd expect. Negative values below -2 exceeded the max iterations, although I suspect might still converge if you increased max iterations, although specifying negative values at all for x1 and x3 is kind of silly, of course, I just did it to get a sense of how robust it was to a range of starting values.
The specifications for SLSQP and trust-const are conceptually the same, but the syntax is a little different (in particular, note the use of NonlinearConstraint).
from scipy.optimize import minimize, NonlinearConstraint, SR1
def f(x):
return math.log(x[0]**2 + 1) + x[1]**4 + x[0]*x[2]
constr_func = lambda x: np.array( [ x[0]**3 - x[1]**2 - 1,
x[0],
x[2] ] )
x0=[0.,0.,0.]
nonlin_con = NonlinearConstraint( constr_func, 0., np.inf )
res = minimize( f, x0, method='trust-constr',
jac='2-point', hess=SR1(),
constraints = nonlin_con )
Here are the results, edited for conciseness:
fun: 0.6931502233468916
message: '`gtol` termination condition is satisfied.'
x: array([1.00000063e+00, 8.21427026e-09, 2.40956900e-06])
Note that the function value and x values are the same as in #CoryKramer's answer. The x array may look superficially different at first glance, but both answers round to [1, 0, 0].

Ineq and eq constraints with scipy.optimize.minimize()

I am attempting to understand the behavior of the constraints in scipy.optimize.minimize:
First, I create 4 assets and 100 scenarios of returns. The average returning funds are in order best to worse D > B > A > C
#seed first
np.random.seed(1)
df_returns = pd.DataFrame(np.random.rand(100,4) - 0.25, columns =list('ABCD'))
df_returns.head()
A B C D
0 0.167022 0.470324 -0.249886 0.052333
1 -0.103244 -0.157661 -0.063740 0.095561
2 0.146767 0.288817 0.169195 0.435220
3 -0.045548 0.628117 -0.222612 0.420468
4 0.167305 0.308690 -0.109613 -0.051899
and a set of weights
weights = pd.Series([0.25, 0.25, 0.25, 0.25], index=list('ABCD'))
0
A 0.25
B 0.25
C 0.25
D 0.25
we create an objective function:
def returns_objective_function(weights, df_returns):
result = -1. * (df_returns * weights).mean().sum()
return result
and constraints and bounds
cons = ({'type': 'eq', 'fun': lambda weights: np.sum(weights) -1 })
bnds = ((0.01, .8), (0.01, .8), (0.01, .8), (0.01, .75))
Let's optimize
optimize.minimize(returns_objective_function, weights, (df_returns),
bounds=bnds, constraints=cons, method= 'SLSQP')
And we get success.
status: 0
success: True
njev: 8
nfev: 48
fun: -0.2885398923185326
x: array([ 0.01, 0.23, 0.01, 0.75])
message: 'Optimization terminated successfully.'
jac: array([-0.24384782, -0.2789166 , -0.21977262, -0.29300382, 0. ])
nit: 8
Now I wish to add constraints starting with a basic inequality:
scipy.optimize.minimize documentation states
Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative.
cons = (
{'type': 'eq', 'fun': lambda weights: np.sum(weights) -1 }
,{'type': 'ineq', 'fun': lambda weights: np.sum(weights) + x}
)
Depending on x, I get unexpected behavior.
x = -100
Based on the bounds, weights can be a maximum of 3.15 and, of course, must sum to 1 by the first equality constraint np.sum(weights) - 1, but, as a result, np.sum(weights) + x would always be negative. I believe no solution should be found, yet scipy.optimize.minimize returns success.
With a simpler model I get the same behavior:
x = [1,2]
optimize.minimize(
lambda x: x[0]**2+x[1]**2,
x,
constraints = (
{'type':'eq','fun': lambda x: x[0]+x[1]-1},
{'type':'ineq','fun': lambda x: x[0]-2}
),
bounds = ((0,None),(0,None)),
method='SLSQP')
with results:
nfev: 8
fun: 2.77777777777712
nit: 6
jac: array([ 3.33333334e+00, 2.98023224e-08, 0.00000000e+00])
x: array([ 1.66666667e+00, 1.39888101e-14])
success: True
message: 'Optimization terminated successfully.'
status: 0
njev: 2
There should be some flag that this is an infeasible solution.
SLSQP is also available from R:
> slsqp(c(1,2),
+ function(x) {x[1]^2+x[2]^2},
+ heq=function(x){x[1]+x[2]-1},
+ hin=function(x){x[1]-2},
+ lower=c(0,0))
$par
[1] 1.666667e+00 4.773719e-11
$value
[1] 2.777778
$iter
[1] 105
$convergence
[1] -4
$message
[1] "NLOPT_ROUNDOFF_LIMITED: Roundoff errors led to a breakdown of the optimization algorithm. In this case, the returned minimum may still be useful. (e.g. this error occurs in NEWUOA if one tries to achieve a tolerance too close to machine precision.)"
At least we see some warning signals here.

scipy.optimize show all iteration input and output values

I am using scipy.optimize.minimize to find the optimum value from a function. Here is the simplest example, using the built-in Rosenbrock function:
>>> from scipy.optimize import minimize, rosen
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> # Minimize returns a scipy.optimize.OptimizeResult object...
>>> res = minimize(rosen, x0, method='Nelder-Mead')
>>> print res
status: 0
nfev: 243
success: True
fun: 6.6174817088845322e-05
x: array([ 0.99910115, 0.99820923, 0.99646346, 0.99297555, 0.98600385])
message: 'Optimization terminated successfully.'
nit: 141
x is just the final, optimum input vector. ​Can I get a list for all iterations (i.e. an objective function with corresponding input vector) from the returned scipy.optimize.OptimizeResult object?
Yes, you could add the optional argument 'return_all'.
Example:
from scipy.optimize import minimize
def f(params):
x1, x2 = params
f = 4 * ((x1**2+(10-x2)**2)**0.5 - 10)**2 \
+ (1/2)*((x1**2+(10+x2)**2)**0.5-10)**2 \
-5*(x1+x2)
return f
x0 = [-4, 4]
res = minimize(f, x0, method='CG', options={'return_all':True})
# This example returns all iteration.

Quadratic Program (QP) Solver that only depends on NumPy/SciPy?

I would like students to solve a quadratic program in an assignment without them having to install extra software like cvxopt etc. Is there a python implementation available that only depends on NumPy/SciPy?
I'm not very familiar with quadratic programming, but I think you can solve this sort of problem just using scipy.optimize's constrained minimization algorithms. Here's an example:
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
# minimize
# F = x[1]^2 + 4x[2]^2 -32x[2] + 64
# subject to:
# x[1] + x[2] <= 7
# -x[1] + 2x[2] <= 4
# x[1] >= 0
# x[2] >= 0
# x[2] <= 4
# in matrix notation:
# F = (1/2)*x.T*H*x + c*x + c0
# subject to:
# Ax <= b
# where:
# H = [[2, 0],
# [0, 8]]
# c = [0, -32]
# c0 = 64
# A = [[ 1, 1],
# [-1, 2],
# [-1, 0],
# [0, -1],
# [0, 1]]
# b = [7,4,0,0,4]
H = np.array([[2., 0.],
[0., 8.]])
c = np.array([0, -32])
c0 = 64
A = np.array([[ 1., 1.],
[-1., 2.],
[-1., 0.],
[0., -1.],
[0., 1.]])
b = np.array([7., 4., 0., 0., 4.])
x0 = np.random.randn(2)
def loss(x, sign=1.):
return sign * (0.5 * np.dot(x.T, np.dot(H, x))+ np.dot(c, x) + c0)
def jac(x, sign=1.):
return sign * (np.dot(x.T, H) + c)
cons = {'type':'ineq',
'fun':lambda x: b - np.dot(A,x),
'jac':lambda x: -A}
opt = {'disp':False}
def solve():
res_cons = optimize.minimize(loss, x0, jac=jac,constraints=cons,
method='SLSQP', options=opt)
res_uncons = optimize.minimize(loss, x0, jac=jac, method='SLSQP',
options=opt)
print '\nConstrained:'
print res_cons
print '\nUnconstrained:'
print res_uncons
x1, x2 = res_cons['x']
f = res_cons['fun']
x1_unc, x2_unc = res_uncons['x']
f_unc = res_uncons['fun']
# plotting
xgrid = np.mgrid[-2:4:0.1, 1.5:5.5:0.1]
xvec = xgrid.reshape(2, -1).T
F = np.vstack([loss(xi) for xi in xvec]).reshape(xgrid.shape[1:])
ax = plt.axes(projection='3d')
ax.hold(True)
ax.plot_surface(xgrid[0], xgrid[1], F, rstride=1, cstride=1,
cmap=plt.cm.jet, shade=True, alpha=0.9, linewidth=0)
ax.plot3D([x1], [x2], [f], 'og', mec='w', label='Constrained minimum')
ax.plot3D([x1_unc], [x2_unc], [f_unc], 'oy', mec='w',
label='Unconstrained minimum')
ax.legend(fancybox=True, numpoints=1)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('F')
Output:
Constrained:
status: 0
success: True
njev: 4
nfev: 4
fun: 7.9999999999997584
x: array([ 2., 3.])
message: 'Optimization terminated successfully.'
jac: array([ 4., -8., 0.])
nit: 4
Unconstrained:
status: 0
success: True
njev: 3
nfev: 5
fun: 0.0
x: array([ -2.66453526e-15, 4.00000000e+00])
message: 'Optimization terminated successfully.'
jac: array([ -5.32907052e-15, -3.55271368e-15, 0.00000000e+00])
nit: 3
This might be a late answer, but I found CVXOPT - http://cvxopt.org/ - as the commonly used free python library for Quadratic Programming. However, it is not easy to install, as it requires the installation of other dependencies.
I ran across a good solution and wanted to get it out there. There is a python implementation of LOQO in the ELEFANT machine learning toolkit out of NICTA (http://elefant.forge.nicta.com.au as of this posting). Have a look at optimization.intpointsolver. This was coded by Alex Smola, and I've used a C-version of the same code with great success.
mystic provides a pure python implementation of nonlinear/non-convex optimization algorithms with advanced constraints functionality that typically is only found in QP solvers. mystic actually provides more robust constraints than most QP solvers. However, if you are looking for optimization algorithmic speed, then the following is not for you. mystic is not slow, but it's pure python as opposed to python bindings to C. If you are looking for flexibility and QP constraints functionality in a nonlinear solver, then you might be interested.
"""
Maximize: f = 2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2
Subject to: -2*x[0] + 2*x[1] <= -2
2*x[0] - 4*x[1] <= 0
x[0]**3 -x[1] == 0
where: 0 <= x[0] <= inf
1 <= x[1] <= inf
"""
import numpy as np
import mystic.symbolic as ms
import mystic.solvers as my
import mystic.math as mm
# generate constraints and penalty for a nonlinear system of equations
ieqn = '''
-2*x0 + 2*x1 <= -2
2*x0 - 4*x1 <= 0'''
eqn = '''
x0**3 - x1 == 0'''
cons = ms.generate_constraint(ms.generate_solvers(ms.simplify(eqn,target='x1')))
pens = ms.generate_penalty(ms.generate_conditions(ieqn), k=1e3)
bounds = [(0., None), (1., None)]
# get the objective
def objective(x, sign=1):
x = np.asarray(x)
return sign * (2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
# solve
x0 = np.random.rand(2)
sol = my.fmin_powell(objective, x0, constraint=cons, penalty=pens, disp=True,
bounds=bounds, gtol=3, ftol=1e-6, full_output=True,
args=(-1,))
print 'x* = %s; f(x*) = %s' % (sol[0], -sol[1])
Things to note is that mystic can generically apply LP, QP, and higher order equality and inequality constraints to any given optimizer, not just a special QP solver. Secondly, mystic can digest symbolic math, so the ease of defining/entering the constraints is a bit nicer than working with the matrices and derivatives of functions. mystic depends on numpy, and will use scipy if it is installed (however, scipy is not required). mystic utilizes sympy to handle symbolic constraints, but it's also not required for optimization in general.
Output:
Optimization terminated successfully.
Current function value: -2.000000
Iterations: 3
Function evaluations: 103
x* = [ 2. 1.]; f(x*) = 2.0
Get mystic here: https://github.com/uqfoundation
The qpsolvers package also seems to fit the bill. It only depends on NumPy and can be installed by pip install qpsolvers. Then, you can do:
from numpy import array, dot
from qpsolvers import solve_qp
M = array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]])
P = dot(M.T, M) # quick way to build a symmetric matrix
q = dot(array([3., 2., 3.]), M).reshape((3,))
G = array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]])
h = array([3., 2., -2.]).reshape((3,))
# min. 1/2 x^T P x + q^T x with G x <= h
print "QP solution:", solve_qp(P, q, G, h)
You can also try different QP solvers (such as CVXOPT mentioned by Curious) by changing the solver keyword argument, for example solver='cvxopt' or solver='osqp'.

Categories

Resources