I have a fairly basic model that I am trying to run and I keep getting the following error message:
ERROR: Unexpected exception while running model: The NL writer has detected multiple active objective functions on model unknown, but currently only handles a single objective.
Given that error, I think that CPLEX is interpreting my objective function as a multi-objective problem. What I am trying to do is minimize 'model.obj' which is defined over the indices (i,j,t), but I don't believe this would mean that I have multiple objective functions? The objective function is trying to be formulated as equation 11 below:
from __future__ import division
from pyomo.environ import *
from MPBFunctions import *
# Variable Initialization Matricies
susceptible_init = mpbdata(1,3,1,3)
inf_b4treat_init = mpbdata(1,3,13,15)
##########################################################################################################################
# Set Declaration
##########################################################################################################################
model = ConcreteModel()
Imax = 3
Jmax = 3
Tmax = 2
Kmax = 2
model.Iset = RangeSet(1,Imax) # e.g. i = {1, 2, 3}
model.Jset = RangeSet(1,Jmax)
model.Tset = RangeSet(1,Tmax)
model.Kset = RangeSet(1,Kmax)
##########################################################################################################################
# Parameter Declaration
##########################################################################################################################
##########################################################################################################################
# Variable Declaration
##########################################################################################################################
model.susceptible = Var(model.Iset,model.Jset,model.Tset, initialize=initial_values(3,2,susceptible_init))
model.inf_b4treat = Var(model.Iset,model.Jset,model.Tset, initialize=initial_values(3,2,inf_b4treat_init))
model.inf_treated = Var(model.Iset,model.Jset,model.Tset)
model.level1 = Var(model.Iset,model.Jset,model.Tset,within=Binary)
##########################################################################################################################
# Objective Function
##########################################################################################################################
def objective_rule(model,i,j,t):
return model.obj[i,j,t] == sum(2*model.inf_b4treat[i,j,t] for i in model.Iset for j in model.Jset for t in model.Tset)
model.damages = Objective(model.Iset, model.Jset, model.Tset, rule=objective_rule)
##########################################################################################################################
# Constraint Declaration w/ Imax=Jmax=Tmax = 3 and Kmax = 3
##########################################################################################################################
# Constraint 4: Susceptible recruitment
def susceptible_advance_rule(model, i, j, t):
if t == Tmax:
return Constraint.Skip
else:
return model.susceptible[i, j, t + 1] == model.susceptible[i, j, t] - model.inf_b4treat[i, j, t]
model.susceptible_advance = Constraint(model.Iset, model.Jset, model.Tset, rule=susceptible_advance_rule)
# Constraint 9: Treated Infestation
def treatment_rule(model, i, j, t):
return model.inf_treated[i, j, t] == 0.20 * model.susceptible[i, j,t] * (1 - 0.15 * model.level1[i, j, t])
model.treated_pop = Constraint(model.Iset, model.Jset, model.Tset, rule=treatment_rule)
The objective_rule function should return an expression, you are returning an equality (as if it was a constraint).
def objective_rule(model,i,j,t):
return sum(2*model.inf_b4treat[i,j,t] for i in model.Iset for j in model.Jset for t in model.Tset)
Also double check that your input data are correct.
Related
I'm writing a not full-implemented Python function using SymPy library which looks for the critical points of a mathematical function f through the KKT conditions, as it follows:
def KKT(f: str, h=[], g=[], max=True):
# NOTE: The expressions contained in g must be such that g <= 0 and the ones contained in h must be such that h = 0. Both g and h are string lists
import sympy as sp # Importing a SymPy library
n = len(h) # Quantity of equality constraints
m = len(g) # Quantity of inequality constraints
f = sp.parse_expr(f) # Constructing the f function by passing a string as an argument
vars = f.free_symbols # Getting the variables set
pars = list(vars) # Parameters list
if n > 0:
for i in range(n):
exec(f'h{i+1} = sp.Symbol("h_{i+1}")')
exec(f'h{i+1} = sp.parse_expr(h[{i}])') # Create the equality constraint h_i
exec(f'l{i+1} = sp.Symbol("\\lambda_{i+1}")') # Create the parameter lambda_i
exec(f'pars.append(l{i+1})') # Adding lambda_i to parameters list
if m > 0:
for j in range(m):
exec(f'g_{j+1} = sp.Symbol("g_{j+1}")')
exec(f'g{j+1} = sp.parse_expr(g[{j}])') # Create the inequality constraint g_i
exec(f'u{j+1} = sp.Symbol("\\mu_{j+1}", negative=False)') # Create the parameter mu_i
exec(f'pars.append(u{j+1})') # Add mu_i to parameters list
exec(f'p{j+1} = sp.Symbol("p_{j+1}", negative=False)') # Create fill portion p_i
exec(f'pars.append(p{j+1})') # Add p_i to parameters list
# Creating the Lagrangean
L = f
if n > 0:
for i in range(n):
exec(f'L = L - l{i+1} * h{i+1}') 'Adding lambda_j * h_j to the Lagrangean
if m > 0:
for j in range(m):
exec(f'L = L - u{j+1} * g{j+1}') # <- THIS LINE IS NOT WORKING
# Adding mu_i * g_i to the Lagrangean
print(f'j: {j}')
print(f'{L}\n')
# Creating the KKT condition from Lagrangean
R = [] # Constraint's set
for var in vars:
R.append(sp.diff(L, var)) # Add the Lagrangean's partial derivative with respect to var
if n > 0:
for i in range(n):
exec(f'R.append(h{i+1})')
if m > 0:
for j in range(m):
exec(f'R.append(u{j+1} * g{j+1})')
exec(f'R.append(g{j+1} + p{j+1})')
# Solving KKT conditions
sols_lagr = sp.solve(R, pars, dict=True) # Lagrangian solutions
critical_points = [{var: sol.get(var) for var in sol if var in vars} for sol in sols_lagr]
return critical_points
KKT('24*x_1 - x_1**2 + 10*x_2 - 2*x_2**2',
[],
['x_1 - 8', 'x_2 - 7', '-x_1', '-x_2']
)
However, for some reason, the following line code doesn't work:
exec(f'L = L - u{j+1} * g{j+1}')
Because I'm getting this result when I execute this code's block:
j: 0
-x_1**2 + 24*x_1 - 2*x_2**2 + 10*x_2
j: 1
-x_1**2 + 24*x_1 - 2*x_2**2 + 10*x_2
j: 2
-x_1**2 + 24*x_1 - 2*x_2**2 + 10*x_2
j: 3
-x_1**2 + 24*x_1 - 2*x_2**2 + 10*x_2
Which shows the Lagrangean is not adding up the parcels. I'd be grateful if someone could help me.
The Problem occurs in line 29:
It is a Type Error
I can't figure out where I went wrong with my parameters. It should assign every a[i][k] with a value but it just ends up with the following error message:
a[i][k].append(g * m[i] * dr[k]/d3)
TypeError: 'int' object is not subscriptable
Here the full code:
import numpy as np
from numpy import absolute
from numpy import power
r = [[1,1,1],[1,1,1],[0,0,0]]
v = [[0,0,0],[0,0,0],[0,0,0]]
a = [[0,0,0],[0,0,0],[0,0,0]]
m = [1,1,1]
O = -1
N = 3
def beschleunigung(O, N, m, r, a):
i = 0
k = 0
dr = [0,0,0]
d3 = 0
g = 1
for k in range(1,3):
a[i][k] = 0
for i in range(1,N):
if i != O:
for k in range(1,3):
a = (r[i][k])
b = (r[0][k])
dr[k] = a - b
d3 = np.power(np.absolute(dr),3)
for k in range(1,3):
a[i][k].append(g * m[i] * dr[k]/d3)
beschleunigung(O,N,m,r,a)
print(a[1])
When your code executes the line a = (r[i][k]), a becomes an integer, rather than a list of lists as it was in the input to this function. This causes your append to fail as you cannot append to an integer.
I expect that you intended to create another variable to use in your subtraction with b - make sure to use a name that is not already defined in your scope.
When I use a fairly straight forward cost function for my optimization objective function, gurobi gives back an answer but when I complicate things with math.log() functions or even with i**2 instead of i*i it produces an error similar to one of the following:
GurobiError: Divisor must be a constant
TypeError: a float is required
TypeError: unsupported operand type(s) for ** or pow(): 'Var' and 'int'
I tried to reformulate math.log((m-i)/i) to math.log(m-i)- math.log(i) this produces the float is required error. changing i*i to i**2 produces unsupported error.
Now my question is: is it just impossible to make a more complex function within Gurobi? or am I making an mistake elsewhere.
Here is a snippit of my model
from gurobipy import *
import pandas as pd
import numpy as np
import time
import math
start_time = time.time()
# example NL (i, 20, 0.08, -6.7, 301)
def cost(i, j, k, l, m):
cost = (j - l)*i + k*i*i - l*(m - i) * (math.log((m - i) / i ))
return cost
def utility(i, j, k, l):
utility = j + k*i + l*i*i
return utility
"""
def cost(i, j, k, l):
cost = j + k*i + .5*l*i*i
return cost
"""
# assign files to use as input and as output
outputfile = 'model1nodeoutput.csv'
inputfile = 'marketclearinginput.xlsx'
# define dataframes
dfdemand = pd.read_excel(inputfile, sheetname="demand", encoding='utf8')
dfproducer = pd.read_excel(inputfile, sheetname="producer", encoding='utf8')
m = Model("1NodeMultiPeriod")
dofprod = [m.addVar(lb=3.0, ub=300, name=h) for h in dfproducer['name']]
dofdem = [m.addVar(lb=3.0, ub=300, name=h) for h in dfdemand['name']]
# Integrate new variables
m.update()
# Set objective
m.setObjective(quicksum([utility(i, j, k, l) for i, j, k, l
in zip(dofdem, dfdemand['c'], dfdemand['a'], dfdemand['b'])]) -
quicksum([cost(i, j, k, l, m) for i, j, k, l, m
in zip(dofprod, dfproducer['c'], dfproducer['a'], dfproducer['b'], dfproducer['Pmax'])]),
GRB.MAXIMIZE)
# Set constraints
# Set constraints for producers
for i, j, k in zip(dofprod, dfproducer['Pmin'], dfproducer['Pmax']):
m.addConstr(i >= j)
m.addConstr(i <= k)
# Set constraints for demand
for i, j, k in zip(dofdem, dfdemand['Pmin'], dfdemand['Pmax']):
m.addConstr(i >= j)
m.addConstr(i <= k)
# Build the timestamp list, pd or np unique both possible, pd faster and preserves order
# Timestamps skips the first 3 symbols (example L1T2034 becomes 2034)
timestamps = pd.unique([i.varName[3:] for i in dofprod])
# Set constraint produced >= demanded (this should be te last constraint added for shadow variables)
for h in timestamps:
m.addConstr(quicksum([i for i in dofprod if i.varName.endswith(h)]) >=
quicksum([i for i in dofdem if i.varName.endswith(h)]))
m.optimize()
Your problem might have to do with the Gurobi quicksum() function. Perhaps try sum().
I'm a new learner of python programming. Recently I'm trying to write a "tool" program of "dynamic programming" algorithm. However, the last part of my programe -- a while loop, failed to loop. the code is like
import numpy as np
beta, rho, B, M = 0.5, 0.9, 10, 5
S = range(B + M + 1) # State space = 0,...,B + M
Z = range(B + 1) # Shock space = 0,...,B
def U(c):
"Utility function."
return c**beta
def phi(z):
"Probability mass function, uniform distribution."
return 1.0 / len(Z) if 0 <= z <= B else 0
def Gamma(x):
"The correspondence of feasible actions."
return range(min(x, M) + 1)
def T(v):
"""An implementation of the Bellman operator.
Parameters: v is a sequence representing a function on S.
Returns: Tv, a list."""
Tv = []
for x in S:
# Compute the value of the objective function for each
# a in Gamma(x), and store the result in vals (n*m matrix)
vals = []
for a in Gamma(x):
y = U(x - a) + rho * sum(v[a + z]*phi(z) for z in Z)
# the place v comes into play, v is array for each state
vals.append(y)
# Store the maximum reward for this x in the list Tv
Tv.append(max(vals))
return Tv
# create initial value
def v_init():
v = []
for i in S:
val = []
for j in Gamma(i):
# deterministic
y = U(i-j)
val.append(y)
v.append(max(val))
return v
# Create an instance of value function
v = v_init()
# parameters
max_iter = 10000
tol = 0.0001
num_iter = 0
diff = 1.0
N = len(S)
# value iteration
value = np.empty([max_iter,N])
while (diff>=tol and num_iter<max_iter ):
v = T(v)
value[num_iter] = v
diff = np.abs(value[-1] - value[-2]).max()
num_iter = num_iter + 1
As you can see, the while loop at the bottom is used to iterate over "value function" and find the right answer. However, the while fails to loop, and just return num_iter=1. As for I know, the while loop "repeats a sequence of statements until some condition becomes false", clearly, this condition will not be satisfied until the diff converge to near 0
The major part of code works just fine, as far as I use the following for loop
value = np.empty([num_iter,N])
for x in range(num_iter):
v = T(v)
value[x] = v
diff = np.abs(value[-1] - value[-2]).max()
print(diff)
You define value as np.empty(...). That means that it is composed completely of zeros. The difference, therefore, between the last element and the second-to-last element will be zero. 0 is not >= 0.0001, so that expression will be False. Therefore, your loop breaks.
I am trying to write a program that calculates the optimum amount to bet based on log utility and simultaneous dependent events.
In order to do this I am trying to use the numpy.optimize.fmin function. The function anon that I am passing to it works and produces (hopefully) correct output but when numpy tries to optimise the function I get the following error
s[i].append(f[i][0]*w[i][0] + f[i][1]*w[i][1])
IndexError: invalid index to scalar variable.
Since I have no idea about fmin, I have no idea what is causing this error.
My code is below, hopefully not tl;dr but I wouldn't blame you.
APPENDIX
def main():
p = [[0.1,0.1,0.2, 0.2,0.1,0, 0.1,0.1,0.1]]
w = [[5,4]]
MaxLU(p,w,True)
def MaxLU(p, w, Push = False, maxIter = 10):
#Maximises LU, using Scipy in built function
if Push == True:
anon = lambda f: -PushLogUtility(p, w, f)
else:
anon = lambda f: -LogUtility(p, w, f)
#We use multiple random starts
f = []
LU = []
for i in range(0,maxIter):
start = np.random.rand(len(p))
start = start / 5 * np.sum(start)
f.append(optimize.fmin(anon, start)) #Error occurs in here!
if Push == True:
LU.append(PushLogUtility(p, w, f[-1]))
else:
LU.append(LogUtility(p, w, f[-1]))
#Now find the index of the max LU and return that same index of f
return f[LU.index(np.max(LU))]
def PushLogUtility(p,w,f):
#Outputs log utility incoroporating pushes and dependent totals, money data
#p : 9xk length vector of joint probabilities for each of the k games, p = [[p_(W_T W_M), p_(W_T P_M), p_(W_T L_M), p_(P_T W_M) ... ]]
#w : 2xk matrix of odds where w = [[total odds, money odds] ... ]
#f : 2xk matrix of bankroll percentages to bet, f = [[f_T, f_M] ... ]
utility = 0
k = len(p)
s = k*[[]]
for i in range(0,k):
s[i].append(f[i][0]*w[i][0] + f[i][1]*w[i][1])
s[i].append(f[i][0]*w[i][0])
s[i].append(f[i][0]*w[i][0] - f[i][1])
s[i].append(f[i][1]*w[i][1])
s[i].append(0)
s[i].append(-f[i][1])
s[i].append(-f[i][0] - f[i][1])
s[i].append(-f[i][0] - f[i][1])
s[i].append(-f[i][0] - f[i][1])
for i in range(0,9 ** k):
l = de2ni(i) #Converts number to base 9
if i == 0:
l += int(math.ceil(k - 1 - math.log(i + 1,9))) * [0]
else:
l += int(math.ceil(k - 1 - math.log(i,9))) * [0]
productTerm = np.prod([p[i][l[i]] for i in range(0,k)])
sumTerm = np.sum([s[i][l[i]] for i in range(0,k)])
utility = utility + productTerm * np.log(1 + sumTerm)
return utility
Here where you do:
s[i].append(f[i][0]*w[i][0] + f[i][1]*w[i][1])
if you look at the types, you'll find s[i] is a [], f[i] is 0.104528 and w[i] is [5,4]. You then try to index f[i] a second time - which is not possible and causes the error.