I'm receiving the following error from scipy minimize.
RuntimeWarning: overflow encountered in double_scalars
inner = x[k]*((x[0]**int(cAnde[ses][sub][strats][0]))*(1.0-x[0])**int(cAnde[ses][sub][strats][1]))
Traceback (most recent call last):
File "MLEestimation.py", line 39, in ,module.
constraints=MLEfunction.MLEconstrain(x), method = 'SLSQP', options = {'disp': True})
File "C:\ProgramData\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 458, in minimize
constraints, callback=callback, **options)
File "C:\ProgramData\Anaconda2\lib\site-packages\scipy\optimize\slsqp.py", line 370 in _minimize_slsqp
raise ValueError("Objective function must return a scalar")
ValueError: Objective function must return a scalar
But, when I check if the output of the objective function is a scalar using np.isscalar, I get that it is, so I'm uncertain of what I'm doing wrong. Below is all the code for reference.
import math
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import MLEfunction
x = [0.9,0.2,0.1,0.2,0.1,0.2,0.2]
def MLE(x, cAnde):
est = 0.0
for ses in cAnde:
for sub in cAnde[ses]:
k = 0
for strats in cAnde[ses][sub]:
k += 1
inner = x[k] * x[0] ** int(cAnde[ses][sub][strats][0]) \
* (1.0 - x[0]) ** int(cAnde[ses][sub][strats][1])
est += math.log(inner)
est = -1.0 * est
return est
def MLE_deriv(x, cAnde):
der = np.zeros_like(x)
d0 = 0
for ses in cAnde:
for sub in cAnde[ses]:
for strat in cAnde[ses][sub]:
inner = float(cAnde[ses][sub][strat][0]) / x[0]
inner += float(cAnde[ses][sub][strat][1]) / (1 - x[0])
d0 += inner
der[0] = -1.0 * d0
for k in range(len(x) - 1):
d = 0
for ses in cAnde:
for sub in cAnde[ses]:
d += 1.0 / x[k + 1]
der[k + 1] = -1.0 * d
return der
def MLEconstraint(x):
cons = []
jacs = []
for j in range(len(x)):
if j == 0:
jacs.append(0.0)
else:
jacs.append(1.0)
sum1 = {'type': 'eq', 'fun': lambda x: np.array([sum(x[1:]) - 1]),
'jac': lambda x: np.array(jacs)}
cons.append(sum1)
for k in range(len(x)):
jacu = []
jacd = []
for l in range(len(x)):
if l == k:
jacu.append(-1.0)
jacd.append(1.0)
else:
jacu.append(0.0)
jacd.append(0.0)
up = {'type': 'ineq', 'fun': lambda x: np.array([1 - x[k]]),
'jac': lambda x: np.array(jacu)}
low = {'type': 'ineq', 'fun': lambda x: np.array([x[k]]),
'jac': lambda x: np.array(jacd)}
cons.append(up)
cons.append(low)
return tuple(cons)
res = minimize(
MLEfunction.MLE,
x,
args=CEs['48,0.75'],
jac=MLEfunction.MLE_deriv,
constraints=MLEfunction.MLEconstraint(x),
method='SLSQP',
options={'disp': True},
)
print res.x
Note: the minimization is being done in a different .py file than the functions are defined, and I am importing the original file defining the functions
Related
Say that I want to solve a parametric constrained optimization, method. Is there any method I can use to avoid looping over the parameters?
I would use a code (adapted to my problem) such as (credit):
import numpy as np
from scipy.optimize import minimize
import random
import pandas as pd
pd.options.display.float_format = '{:.5f}'.format
def uncovered(x):
## note x[0]--> x ; x[1]--> y
first_prod = x[0]**2-2*x[0]-4*x[1]+1
second_prod_first = x[0]**3*(8*a**2*(b-1)*b-4*a*(b-2)-3)
second_prod_second = x[0]**2*(-8*a**2*(b-1)*b-8*a+5)
second_prod_third = x[0]*(4*x[1]*(4*a*(b-1)+1)+4*a*b - 1) -4*x[1] - 1
denom = 8*(x[0]-1)*(x[0]*(2*a*(b-1)+1)-1)**2
if (x[0]-1==0):
final_func=0
else:
final_func = first_prod*(second_prod_first+second_prod_second+second_prod_third)/denom
return -final_func
def constraint_strict(x):
return x[1]-0.5*a*x[0]*(x[0]-1)*(1-b)-0.0001
def constraint_nonstrict(x):
return 0.25*(1-2*x[0]+x[0]**2) - x[1]
def constraint_1(x):
return 1-x[0]
def constraint_2(x):
return x[0]
def run_uncovered_market_maximization(alpha,xi):
a=alpha
b=xi
# initial guesses
n = 2
x0 = np.zeros(n)
x0[0] = 1
x0[1] = 1.0
success = False
# show initial objective
print("We are maximizing uncovered welfare:")
print('Initial Objective: ' + str(uncovered(x0)))
while success == False:
# optimize
bnds = ((0,1),(-np.inf,np.inf))
con1 = {'type': 'ineq', 'fun': constraint_strict}
con2 = {'type': 'ineq', 'fun': constraint_nonstrict}
cons = ([con1,con2])
solution = minimize(uncovered,x0,method='SLSQP', bounds=bnds,constraints=cons,options={'disp': False})
x = solution.x
success = solution.success
if success == False:
x0[0] = x0[0]-random.uniform(0, 1)
x0[1] = 5.0-random.uniform(0, 1)
indcons=(x[1]+a*(1-x[0])*x[0]*(1-b)/2)/((((1-x[0])**2)/4)+a*(1-x[0])*(1-b)*x[0]/2)
# show final objective
print("Success:"+str(solution.success))
print('Final Objective: ' + str(uncovered(x)))
print('First Constraint: ' + str(constraint_strict(x)))
print('Second Constraint: ' + str(constraint_nonstrict(x)))
# print solution
print('Solution')
print('d = ' + str(x[0]))
print('p = ' + str(x[1]))
print('market demand:'+ str(1-indcons))
return (solution.success, str(x[0]),str(x[1]),str(uncovered(x)),indcons)
I am unsure on how to handle a and b given that the maximization should not be over these and they only represent parameters. I want to have a pair (x,y) for any acceptable value of (a,b). How to set the boundaries? and how to insert a and b if not by looping over them?
For now I do:
optlist =[]
for b in np.linspace(start=0, stop=1, num=2000):
for a in np.linspace(start=0, stop=1, num=2000):
print('-------------Optimization for a:{} and b:{}-------------------'.format(a,b))
print('-------------Uncovered market')
(usuccess, du, pu,ufinal,uindcons) = run_uncovered_market_maximization(a,b)
How do I solve this error?
TypeError: NumPy boolean subtract, the `-` operator, is not supported, use the bitwise_xor, the `^` operator, or the logical_xor function instead.
I have programmed an optimizing program that must minimize the cost of a wall design. The wall is based on 3 parameters, x, k and m. There are constraints to the sizes of x, k and m as shown. Another constraint is that z (or deflection) must be kept under 100mm. The equation for deflection changes based on a certain t (or time) at which the blast wall is experiencing the blast. If t is below a certain time value which is calculated dependent on, x, k and m the equation is as shown. If t is above the same certain time value, the equation for z changes.
Here is the programming... Please help many thanks :)
import numpy as np
from numpy import linspace
from math import cos
from math import sin
from scipy.optimize import minimize
#Function for minimising
def calcCost(c):
k = c[0]
m = c[1]
x = c[2]
Cost = (900 + 825*k**2 - 1725) + (10*m - 200) + ((2400*x**2)/4)
return Cost
#Objective function
def objective(c):
return calcCost(c)
#Defining Variables
def calck(c):
k = c[0]
k=k
k.resize(12,)
return k
def calcm(c):
m = c[1]
m=m
m.resize(12,)
return m
def calcx(c):
x = c[2]
x=x
x.resize(12,)
return x
def calcz(c):
k = c[0]
x = c[1]
m = c[2]
l = linspace(0,140,141)
for t in l:
if t <= ((20 - 0.12*x**2 + 4.2*x)/1000):
deflection = ((((1000+9*x**2-183*x)*1000)/k)*(1-cos(t*((k/m)**0.5))) + (((1000+9*x**2-183*x)*1000)/k*((20 - 0.12*x**2 + 4.2*x)/1000))*((sin(t*((k/m)**0.5))/((k/m)**0.5))-t))*1000
else:
deflection = ((((1000+9*x**2-183*x)*1000)/(k*((k/m)**0.5)*((20 - 0.12*x**2 + 4.2*x)/1000)))*(sin(((k/m)**0.5)*t))-(sin(((k/m)**0.5)*(t-((20 - 0.12*x**2 + 4.2*x)/1000))))-(((1000+9*x**2-183*x)*1000)/k)*cos(((k/m)**0.5)*t))*1000
deflection.resize(12,)
return deflection
#Constraint functions
def kconstraint1(c):
k = c[0]
return k-(1*10**6) >= 0
def kconstraint2(c):
k = c[0]
return k-(7*10**6) <= 0
def mconstraint1(c):
m = c[0]
return m-200 >= 0
def mconstraint2(c):
m = c[0]
return m-1200 <= 0
def xconstraint1(c):
x = c[0]
return x >= 0
def xconstraint2(c):
x = c[0]
return x <= 10
def zconstraint1(c):
k = c[0]
x = c[1]
m = c[2]
l = linspace(0,140,141)
for t in l:
if t <= ((20 - 0.12*x**2 + 4.2*x)/1000):
deflection = ((((1000+9*x**2-183*x)*1000)/k)*(1-cos(t*((k/m)**0.5))) + (((1000+9*x**2-183*x)*1000)/k*((20 - 0.12*x**2 + 4.2*x)/1000))*((sin(t*((k/m)**0.5))/((k/m)**0.5))-t))*1000
else:
deflection = ((((1000+9*x**2-183*x)*1000)/(k*((k/m)**0.5)*((20 - 0.12*x**2 + 4.2*x)/1000)))*(sin(((k/m)**0.5)*t))-(sin(((k/m)**0.5)*(t-((20 - 0.12*x**2 + 4.2*x)/1000))))-(((1000+9*x**2-183*x)*1000)/k)*cos(((k/m)**0.5)*t))*1000
return deflection <= 99.99999999
b = (0.5,1)
be = (0.5,10)
bb = (0.1,2.0)
bnds = (b,be,bb,bb)
con1 = ({'type':'ineq','fun':kconstraint1})
con2 = ({'type':'ineq','fun':kconstraint2})
con3 = ({'type':'ineq','fun':mconstraint1})
con4 = ({'type':'ineq','fun':mconstraint2})
con5 = ({'type':'ineq','fun':xconstraint1})
con6 = ({'type':'ineq','fun':xconstraint2})
con7 = ({'type':'ineq','fun':zconstraint1})
cons = [con1,con2,con3,con4,con5,con6,con7]
xGUESS = 5
kGUESS = 3*10**6
mGUESS = 700
zGUESS = 90
x0 = np.array([xGUESS,kGUESS,mGUESS,zGUESS])
sol = minimize(objective,x0,method='SLSQP',bounds=bnds,constraints=cons,options={'disp':True})
xOpt = sol.x
CostOPT = sol.fun
kOPT = calck(xOpt)
xOPT = calcx(xOpt)
mOPT = calcm(xOpt)
zOPT = calcz(xOpt)
print(str(CostOPT))
print(str(calcx))
print(str(calcm))
print(str(calck))
print(str(calcz))
I am running scipy.optimize.minimize to minimize the following function,
def gas_midas_ll(params, low_freq, hi_freq, lag='beta', dist='normal', outer=False):
T = low_freq[0]
nx = hi_freq[1]
omega = params[0]
A1 = params[1]
A2 = params[2]
B = params[3]
phi1 = params[4]
phi2= params[5]
f = np.ones((T+1,1)) ## <--- this is where I think the Traceback ends
s1 = np.ones((T+1,1))
s2 = np.ones((nx,T+1))
f[0] = omega/(1-B)
low_freq = np.append(0,low_freq).reshape((T+1,1))
hi_freq = np.c_[np.zeros((nx,1)),hi_freq]
s2[:,0:1] = hi_freq[:,0:1]**2 - f[0]
if dist == 'normal':
s1[0] = low_freq[0]**2 - f[0]
if lag == 'beta':
for t in range(1,T+1):
f[t] = omega + A1*s1[t-1] + A2*np.sum(beta_lag(phi1,phi2,hi_freq)*s2[:,t-1:t]) + B*f[t-1]
s1[t] = low_freq[t]**2 - f[t]
s2[:,t:t+1] = hi_freq[:,t:t+1]**2 - f[t]
elif lag == 'almon':
phi = np.array((phi1,phi2)).reshape((2,1))
for t in range(1,T+1):
f[t] = omega + A1*s1[t-1] + A2*np.sum(almon_q(phi,hi_freq)*s2[:,t-1:t]) + B*f[t-1]
s1[t] = low_freq[t]**2 - f[t]
s2[:,t:t+1] = hi_freq[:,t:t+1]**2 - f[t]
f = f[1:]
s1 = s1[1:]
s2 = s2[:,1:]
low_freq = low_freq[1:]
hi_freq = hi_freq[:,1:]
constants = (1/2)*np.log(2*np.pi)
ll = -((low_freq**2)/(2*f)) - constants - (1/2)*np.log(f)
elif dist == 't':
v = params[6]
s1[0] = ((v+1)*low_freq[0]**2)/((v-2)+(low_freq[0]**2)/f[0]) - f[0]
if lag == 'beta':
for t in range(1,T+1):
f[t] = omega + B*f[t-1] + A1*s1[t-1] + A2*np.sum(beta_lag(phi1,phi2,hi_freq)*s2[:,t-1:t])
s1[t] = ((v+1)*low_freq[t]**2)/((v-2)+(low_freq[t]**2)/f[t]) - f[t]
s2[:,t:t+1] = hi_freq[:,t:t+1]**2 - f[t]
elif lag == 'almon':
phi = np.array((phi1,phi2)).reshape((2,1))
for t in range(1,T+1):
f[t] = omega + B*f[t-1] + A1*s1[t-1] + A2*np.sum(almon_q(phi,hi_freq)*s2[:,t-1:t])
s1[t] = ((v+1)*low_freq[t]**2)/((v-2)+(low_freq[t]**2)/f[t]) - f[t]
s2[:,t:t+1] = hi_freq[:,t:t+1]**2 - f[t]
f = f[1:]
s1 = s1[1:]
s2 = s2[:,1:]
low_freq = low_freq[1:]
hi_freq = hi_freq[:,1:]
delta = (1/2)*(np.log(v-2)+np.log(np.pi))
G1 = np.log(special.gamma((v+1)/2))
G2 = np.log(special.gamma(v/2))
constants = G1 - G2 - delta
ll = constants - (1/2)*np.log(f) - ((v+1)/2)*np.log(1+(low_freq**2)/(f*(v-2)))
if outer == True:
return -ll
else:
return -np.sum(ll)
I get the error message,
Traceback (most recent call last):
File "C:\Users\***\SpyderProjects\SeminarW1920\draft5.py", line 320, in <module>
bounds=gas_midas_nbounds_beta)
File "C:\Users\***\Anaconda3\lib\site-packages\scipy\optimize\_minimize.py", line 618, in minimize
constraints, callback=callback, **options)
File "C:\Users\***\Anaconda3\lib\site-packages\scipy\optimize\slsqp.py", line 399, in _minimize_slsqp
fx = func(x)
File "C:\Users\***\Anaconda3\lib\site-packages\scipy\optimize\optimize.py", line 327, in function_wrapper
return function(*(wrapper_args + args))
File "C:\Users\***\SpyderProjects\SeminarW1920\likelihoodFunctions2.py", line 153, in gas_midas_ll
f = np.ones((T+1,1))
File "C:\Users\***\Anaconda3\lib\site-packages\numpy\core\numeric.py", line 207, in ones
a = empty(shape, dtype, order)
TypeError: only integer scalar arrays can be converted to a scalar index
Which to me it says, there is an error when calling np.ones()? But I don't understand why? I have similar functions, which I also minimize with scipy.optimize.minimize and none of them throw errors like these. Maybe the error is somewhere else and the erros message is not pointing in the right direction?
My program evaluates error in solving a linear differential equation. It uses only numpy arrays. When I try to use numba's jit decorator for the functions I define, I just get errors. Can you please help me use it properly?
My code:
import numpy as np
from numba import jit
def rk4(t_prev, x_prev, derivs, dt):
k1 = dt * derivs(t_prev, x_prev)
k2 = dt * derivs(t_prev + 1/2*dt, x_prev + 1/2*k1)
k3 = dt * derivs(t_prev + 1/2*dt, x_prev + 1/2*k2)
k4 = dt * derivs(t_prev + dt, x_prev + k3)
x_next = x_prev + 1/6*k1 + 1/3*k2 + 1/3*k3 + 1/6*k4
return x_next
global k, x_0, v_0, t_0, t_f
k = 1
x_0 = 0
v_0 = np.sqrt(k)
t_0 = 0
t_f = 10
dtList = np.logspace(0, -5, 1000)
def derivs(t, X):
deriv = np.zeros([2])
deriv[0] = X[1]
deriv[1] = -k * X[0]
return deriv
def err(dt):
tList = np.arange(t_0, t_f + dt, dt)
N = tList.shape[0]
XList = np.zeros([N,2])
XList[0][0], XList[0][1] = x_0, v_0
for i in range(N-1):
XList[i+1] = rk4(tList[i], XList[i], derivs, dt)
error = np.abs(XList[-1][0] - np.sin(10))
return error
print(err(.001))
The following works for me:
import numpy as np
from numba import jit
#jit(nopython=True)
def rk4(t_prev, x_prev, derivs, dt):
k1 = dt * derivs(t_prev, x_prev)
k2 = dt * derivs(t_prev + 1/2*dt, x_prev + 1/2*k1)
k3 = dt * derivs(t_prev + 1/2*dt, x_prev + 1/2*k2)
k4 = dt * derivs(t_prev + dt, x_prev + k3)
x_next = x_prev + 1/6*k1 + 1/3*k2 + 1/3*k3 + 1/6*k4
return x_next
global k, x_0, v_0, t_0, t_f
k = 1
x_0 = 0
v_0 = np.sqrt(k)
t_0 = 0
t_f = 10
dtList = np.logspace(0, -5, 1000)
#jit(nopython=True)
def derivs(t, X):
deriv = np.zeros(2)
deriv[0] = X[1]
deriv[1] = -k * X[0]
return deriv
#jit(nopython=True)
def err(dt):
tList = np.arange(t_0, t_f + dt, dt)
N = tList.shape[0]
XList = np.zeros((N,2))
XList[0][0], XList[0][1] = x_0, v_0
for i in range(N-1):
XList[i+1] = rk4(tList[i], XList[i], derivs, dt)
error = np.abs(XList[-1][0] - np.sin(10))
return error
print(err(.001))
Note, the only two changes I made to your code was to replace the calls to np.zeros that passed in lists to either a tuple in the 2d case, or just the bare integer in the 1d case. See the following issue for an explanation of why this is:
https://github.com/numba/numba/issues/3993
I'm attempting to minimize the function f(x) = x[0] * x[1] over a system of inequality constraints using scipy.minimize and the solver is returning values which do not respect all of the constraints. For example:
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import fmin_slsqp
# small constant for enforcing strict inequalities
c = 0.000001
# Objective Function to Optimize
def objective(x):
return x[0] * x[1]
# Constraints
def con1(x):
return (x[0]*(1/(7**x[2]))) + 6*x[1] - c
def con2(x):
return (x[0]*(1/(2**x[2]))) + x[1] - c
def con3(x):
return (x[0]*(1/(3**x[2]))) + 2*x[1] - c
def con4(x):
return (x[0]*(1/(4**x[2]))) + 3*x[1] - c
def con5(x):
return (x[0]*(1/(7**x[2]))) - (x[0]*(1/(1**x[2]))) + 7*x[1] - c
def con6(x):
return (x[0]*(1/(2**x[2]))) - (x[0]*(1/(1**x[2]))) + 2*x[1] - c
def con7(x):
return (x[0]*(1/(3**x[2]))) - (x[0]*(1/(1**x[2]))) + 3*x[1] - c
def con8(x):
return -(x[0]*(1/(1**x[2]))) + 4*x[1] - c
def con9(x):
return (x[0]*(1/(1**x[2]))) + 2*x[1] - c
def con10(x):
return (x[0]*(1/(7**x[2]))) + 4*x[1] - c
def con11(x):
return (x[0]*(1/(2**x[2]))) - x[1] - c
def con12(x):
return (x[0]*(1/(4**x[2]))) + x[1] - c
def con13(x):
return x[0] - 1
def con14(x):
return x[1] - 1
def con15(x):
return x[2] - c
# Initial Guesses
x0 = [1,1,1]
# Constraint Objects
constr1 = {'type':'ineq', 'fun':con1}
constr2 = {'type':'ineq', 'fun':con2}
constr3 = {'type':'ineq', 'fun':con3}
constr4 = {'type':'ineq', 'fun':con4}
constr5 = {'type':'ineq', 'fun':con5}
constr6 = {'type':'ineq', 'fun':con6}
constr7 = {'type':'ineq', 'fun':con7}
constr8 = {'type':'ineq', 'fun':con8}
constr9 = {'type':'ineq', 'fun':con9}
constr10 = {'type':'ineq', 'fun':con10}
constr11 = {'type':'ineq', 'fun':con11}
constr12 = {'type':'ineq', 'fun':con12}
constr13 = {'type':'ineq', 'fun':con13}
constr14 = {'type':'ineq', 'fun':con14}
constr15 = {'type':'ineq', 'fun':con15}
cons = [constr1,constr2,constr3,constr4,constr5,constr6,constr7,constr8,constr9,constr10,constr11,constr12,constr13,constr14,constr15]
solution = minimize(objective,x0,method='SLSQP',constraints=cons)
for con in cons:
print(str(con) + str(con['fun'](solution.x)))
Looping over the constraints with the solution values shows that some of the constraints are evaluating as negative even though the constraints are of the form >= 0.
Is this due to some error on my part in the specifications? An issue with numerical precision? Or is this an issue with SLSQP? (see, for example: https://github.com/scipy/scipy/issues/7618).
If this isn't an issue with my specifications, I would also accept suggestions for formulations of this constraint solving problem in other frameworks (preferably in Python).