Below is a simple Pyomo script using the decorator syntax - I would like to understand how to use this syntax within a class - in this case inside Model.
None-class version
from pyomo.environ import *
import random
random.seed(1000)
model = AbstractModel()
model.N = Param(within=PositiveIntegers)
model.P = Param(within=RangeSet(1, model.N))
model.M = Param(within=PositiveIntegers)
model.Locations = RangeSet(1, model.N)
model.Customers = RangeSet(1, model.M)
model.d = Param(
model.Locations,
model.Customers,
initialize=lambda n, m, model: random.uniform(1.0, 2.0),
within=Reals,
)
model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0))
model.y = Var(model.Locations, within=Binary)
#model.Objective()
def obj(model):
return sum(
model.d[n, m] * model.x[n, m] for n in model.Locations for m in model.Customers
)
#model.Constraint(model.Customers)
def single_x(model, m):
return (sum(model.x[n, m] for n in model.Locations), 1.0)
#model.Constraint(model.Locations, model.Customers)
def bound_y(model, n, m):
return model.x[n, m] - model.y[n] <= 0.0
#model.Constraint()
def num_facilities(model):
return sum(model.y[n] for n in model.Locations) == model.P
Decorator version within a class that doesn't work:
from pyomo.environ import *
import random
random.seed(1000)
class Model:
def __init__(self):
self.model = AbstractModel()
self.model.N = Param(within=PositiveIntegers)
self.model.P = Param(within=RangeSet(1, self.model.N))
self.model.M = Param(within=PositiveIntegers)
self.model.Locations = RangeSet(1, self.model.N)
self.model.Customers = RangeSet(1, self.model.M)
self.model.d = Param(
self.model.Locations,
self.model.Customers,
initialize=lambda n, m, model: random.uniform(1.0, 2.0),
within=Reals,
)
self.model.x = Var(
self.model.Locations, self.model.Customers, bounds=(0.0, 1.0)
)
self.model.y = Var(self.model.Locations, within=Binary)
#model.Objective()
def obj(model):
return sum(
model.d[n, m] * model.x[n, m]
for n in model.Locations
for m in model.Customers
)
#model.Constraint(model.Customers)
def single_x(model, m):
return (sum(model.x[n, m] for n in model.Locations), 1.0)
#model.Constraint(model.Locations, model.Customers)
def bound_y(model, n, m):
return model.x[n, m] - model.y[n] <= 0.0
#model.Constraint()
def num_facilities(model):
return sum(model.y[n] for n in model.Locations) == model.P
I'm not able to help you on this, I just have a few qustions:
do you know if the use of #model.Objective() (same for Constraint etc) is documented somewhere? I didn't know it existed, and it's awesome
why do you want your "function rules" to be methods of the class? couldn't you defined them as functions within the __init__ method?
I guess what I'm missing is the benefit of using a class in the first place.
If you are just trying to wrap the model construction somehow, then a better approach is using a function:
def create_model():
model = AbstractModel()
...
#model.Constraint()
def some_rule_function(model):
...
...
return model
EDIT: if you really want to wrap everything into a class:
class Model:
def __init__(self, model):
self.model = model
# alternative constructor:
# def __init__(self):
# self.model = create_model()
def construct(self, data):
# get concrete model
self.model = self.model.create_instance(data)
def run(self, solver, **kwargs):
with pe.SolverFactory(solver) as solver:
solver.solve(self.model, **kwargs)
def construct_and_run(self, data, solver, **kwargs):
self.construct(data)
self.data(solver, **kwargs)
# other behavior you want to add to the class
example usage:
model = Model(create_model())
Trying to answer your direct question, here's something that seems to work for me. My interpretation is that since your model is called self.model, the decorators should also match that.
Note that I used s as the first argument in the constraint method definitions just to see if it worked, but it could also be model or whatever you want to call it.
class Model:
def __init__(self):
self.model = pyo.AbstractModel()
self.model.N = pyo.Param(initialize=5, within=pyo.PositiveIntegers)
self.model.P = pyo.Param(initialize=3, within=pyo.RangeSet(1, self.model.N))
self.model.M = pyo.Param(initialize=3, within=pyo.PositiveIntegers)
self.model.Locations = pyo.RangeSet(1, self.model.N)
self.model.Customers = pyo.RangeSet(1, self.model.M)
self.model.d = pyo.Param(
self.model.Locations,
self.model.Customers,
initialize=lambda n, m, model: random.uniform(1.0, 2.0),
within=pyo.Reals,
)
self.model.x = pyo.Var(
self.model.Locations, self.model.Customers, bounds=(0.0, 1.0)
)
self.model.y = pyo.Var(self.model.Locations, within=pyo.Binary)
#self.model.Objective()
def obj(s):
return sum(
s.d[n, m] * s.x[n, m]
for n in s.Locations
for m in s.Customers
)
#self.model.Constraint(self.model.Customers)
def single_x(s, m):
return (sum(s.x[n, m] for n in s.Locations), 1.0)
#self.model.Constraint(self.model.Locations, self.model.Customers)
def bound_y(s, n, m):
return s.x[n, m] - s.y[n] <= 0.0
#self.model.Constraint()
def num_facilities(s):
return sum(s.y[n] for n in s.Locations) == s.P
You would then be able to instantiate the model with model = Model(), though annoyingly (at least to me), all your Pyomo model components will be within the attribute model.model (e.g., model.model.P).
What I've done before to make the naming cleaner is to inherit from AbstractModel (though the other answer suggests that may not be good practice):
from pyomo.core.base.PyomoModel import AbstractModel
class Model(AbstractModel):
def __init__(self):
AbstractModel.__init__(self)
self.N = pyo.Param(initialize=5, within=pyo.PositiveIntegers)
self.P = pyo.Param(initialize=3, within=pyo.RangeSet(1, self.N))
self.M = pyo.Param(initialize=3, within=pyo.PositiveIntegers)
self.Locations = pyo.RangeSet(1, self.N)
self.Customers = pyo.RangeSet(1, self.M)
self.d = pyo.Param(
self.Locations,
self.Customers,
initialize=lambda n, m, model: random.uniform(1.0, 2.0),
within=pyo.Reals,
)
self.x = pyo.Var(
self.Locations, self.Customers, bounds=(0.0, 1.0)
)
self.y = pyo.Var(self.Locations, within=pyo.Binary)
#self.Objective()
def obj(s):
return sum(
s.d[n, m] * s.x[n, m]
for n in s.Locations
for m in s.Customers
)
#self.Constraint(self.Customers)
def single_x(s, m):
return (sum(s.x[n, m] for n in s.Locations), 1.0)
#self.Constraint(self.Locations, self.Customers)
def bound_y(s, n, m):
return s.x[n, m] - s.y[n] <= 0.0
#self.Constraint()
def num_facilities(s):
return sum(s.y[n] for n in s.Locations) == s.P
In this case, you still instantiate as model = Model() but your Pyomo model components can be accessed as model.P.
Related
I`m trying to solve the following nonlinear-system(please see the pic attached) but I´m truly lost.
EQUATIONS
I need to create one method called solve_prec, which solves the system. I dont know how to use scipy.integrate.solve_ivp in this case.
Note: T (x=0) = 1, and the domain of integration is (0, -0.25). Moreover energy method refers to e, pressure method refers to p. Finally "rho" is den, e0 = energy(1,1) and p0 = pressure(1,1)
Thanks in advance!!
class eos_gamma_c():
def __init__(self, gamma=5/3.0, M0 = 10, P0 = 0.0001, k = 0.0001):
self.gamma = gamma
self.M0 = M0
self.P0 = P0
self.k = k
self.cv = 1./self.gamma/(self.gamma-1.)
return
def energy(self, tem, den):
return self.cv*tem
def pressure(self, tem, den):
return den*tem/self.gamma
def solve_prec(self, tem, den):
ene0 = self.energy(1,1)
pre0 = self.pressure(1,1)
Now, in addition from my two previous posts ODE implements I try to refactro my code and fix some problems. And I decided, that logically create such classes: Solver,Problem.
So code for ODE_Solver and FE classes finally code and working.
# ODS.py
import numpy as np
class ODE_Solver(object):
def __init__(self, f):
if not callable(f):
raise TypeError('f is not %s, function' % type(f))
self.f = lambda u, x: np.asarray(f(u, x), float)
self.err_sch = None
def solver_st(self):
raise NotImplementedError
def err_st(self):
raise NotImplementedError
def set_initial_condition(self, u0):
if isinstance(u0, (float, int)):
self.neq = 1
u0 = float(u0)
else:
u0 = np.asarray(u0)
self.neq = u0.size
self.u0 = u0
try:
f0 = self.f(self.u0, 0)
except IndexError:
raise IndexError(
'index out of bounds f(u,x). correct index %s' % (str(range(self.neq))))
if f0.size != self.neq:
raise ValueError('f(u,x) returend %d elems, vector u has %d elems' % (f0.size, self.neq))
def solve(self, coord_points, terminate=None):
if terminate is None:
terminate = lambda u, x, step_no: False
if isinstance(coord_points, (float, int)):
raise TypeError('solve: x points not numpy array or numbers.')
self.x = np.asarray(coord_points)
if self.x.size <= 1:
raise ValueError('ODESolver.solve points of coords less than two')
n = self.x.size
if self.neq == 1: # ОДУ
self.u = np.zeros(n)
self.err_sch = np.zeros(n)
else:
self.u = np.zeros((n, self.neq))
self.err_sch = np.zeros((n, self.neq))
self.u[0] = self.u0
self.err_sch[0] = 0
for k in range(n - 1):
self.k = k
self.u[k + 1] = self.solver_st()
self.err_sch[k + 1] = self.err_st()
if terminate(self.u, self.x, self.k + 1):
break
return self.u[:k + 2], self.x[:k + 2]
# ES.py
from ODS import ODE_Solver
import numpy as np
class FE(ODE_Solver):
def solver_st(self):
u, f, k, x = self.u, self.f, self.k, self.x
dx = x[k + 1] - x[k]
u_new = u[k] + dx * f(u[k], x[k])
return u_new
def err_st(self):
u, f, k, x, err_sch = self.u, self.f, self.k, self.x, self.err_sch
dx = x[k + 1] - x[k]
err_sch = np.max(dx)**2
return err_sch
I try to implement class Problem (return ODE and get initial conditions)
import numpy as np
class Problem(object):
def __init__(self, u0, End):
self.u0 = np.asarray(u0)
self.End = End # end point of coords
def __call__(self, u, x):
return (u[1], u[2], u[3], u[4],
- 15 * u[4] - 90 * u[3] - 270 * u[2] - 405 * u[1] - 243 * u[0])
And code class Solver for call numerical scheme, plotting the final result, plot and evaluate error:
import numpy as np
import matplotlib as plt
import ES
import ODS
from ADS import ABM4
from ES import FE
from MLNS import MLN
from RKS import RK4
class Solver(object):
def __init__(self, problem, dx,
method=ES.FE): # choose FE scheme for tetsting
"""
"""
self.problem, self.dx = problem, dx
self.solver = method
#staticmethod
def choose_sch(type):
if type == 1:
method = FE
return method
elif type == 2:
method = RK4
return method
elif type == 3:
method = ABM4
return method
elif type == 4:
method = MLN
return method
else:
raise ValueError('not choose numerical scheme!')
def dsolve(self):
solver = self.method(self.problem)
solver.set_initial_condition(self.problem.u0)
n = int(round(self.problem.End / self.dx))
x_points = np.linspace(0, self.problem.End, n + 1)
self.u, self.x = solver.solve(x_points)
if solver.k + 1 == n:
self.plot()
raise ValueError('not converge this scheme,' % self.problem.End)
def plot(self):
plt.plot(self.x, self.u)
plt.show()
Now, when I call this Solver and Problem
import numpy as np
from ODE_Problem import Problem
from SLV_Prob import Solver
def test():
problem = Problem(u0=[0, 3, -9, -8, 0], End=5)
solver = Solver(problem, dx=0.1)
solver.dsolve()
solver.plot()
if __name__ == '__main__':
test()
I get the error:
Traceback (most recent call last):
File "C:\Fin_Proj_ODE\test2.py", line 14, in <module>
test()
File "C:\Fin_Proj_ODE\test2.py", line 9, in test
solver.dsolve()
File "C:\Fin_Proj_ODE\SLV_Prob.py", line 37, in dsolve
solver = self.method(self.problem)
AttributeError: 'Solver' object has no attribute 'method'
And I dont' understand and suppose what reason of this bug...
So, I have 2 questions for implement this Solver:
How to fix this bug?
How to correct rewrite def choose_sch(type):, that I could to call solver and send args type ( and depending on it, a specific numerical scheme will already be started)?
Question One:
Well, as the error states, your Solver class doesn't have an attribute called "method". Your attribute is actually "solver", so instead of calling
self.method(self.problem)
Try
self.solver(self.problem)
Question Two:
If I'm understanding you correctly, you want to know how you can call the choose_sch method from within the solver constructor and take in a type instead of a method directly. For that, simply do this:
class Solver(object):
def __init__(self, problem, dx, solver_type=1): # choose FE scheme for tetsting
"""
"""
self.problem, self.dx = problem, dx
self.solver = self._choose_sch(solver_type)
#staticmethod
def _choose_sch(solver_type):
methods = {1: FE, 2: RK4, 3: ABM4, 4: MLN}
if solver_type in methods:
return methods[solver_type]
else:
raise ValueError('not choose numerical scheme!')
The dictionary here is much better than the if statement for these kinds of tasks.
You can also alternatively not make _choose_ach a staticmethod if you don't need to call it from a static context and just make it set the solver directly.
While autograd's hvp tool seems to work very well for functions, once a model becomes involved, Hessian-vector products seem to go to 0. Some code.
First, I define the world's simplest model:
class SimpleMLP(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, out_dim),
)
def forward(self, x):
'''Forward pass'''
return self.layers(x)
Then, a loss function:
def objective(x):
return torch.sum(0.25 * torch.sum(x)**4)
We instantiate it:
Arows = 2
Acols = 2
mlp = SimpleMLP(Arows, Acols)
Finally, I'm going to define a "forward" function (distinct from the model's forward function) that will serve as the the full model+loss that we want to analyze:
def forward(*params_list):
for param_val, model_param in zip(params_list, mlp.parameters()):
model_param.data = param_val
x = torch.ones((Arows,))
return objective(mlp(x))
This passes a ones vector into the single-layer "mlp," and passes it into our quadratic loss.
Now, I attempt to compute:
v = torch.ones((6,))
v_tensors = []
idx = 0
#this code "reshapes" the v vector as needed
for i, param in enumerate(mlp.parameters()):
numel = param.numel()
v_tensors.append(torch.reshape(torch.tensor(v[idx:idx+numel]), param.shape))
idx += numel
And finally:
param_tensors = tuple(mlp.parameters())
reshaped_v = tuple(v_tensors)
soln = torch.autograd.functional.hvp(forward, param_tensors, v=reshaped_v)
But, alas, the Hessian-Vector Product in soln is all 0's. What is happening?
What's happening is that strict is False by default in the hvp() function and a tensor of 0's is returned as the Hessian Vector Product instead of an error (source).
If you try with strict=True, an error RuntimeError: The output of the user-provided function is independent of input 0. This is not allowed in strict mode. is returned instead. And when I looked at the full error, I suspect that this error comes from _check_requires_grad(jac, "jacobian", strict=strict) which indicates that the jacobian jac is None.
Update:
Following is a full working example:
import torch
from torch import nn
# your loss function
def objective(x):
return torch.sum(0.25 * torch.sum(x)**4)
# Following are utilities to make nn.Module functional
# borrowed from the link I posted in comment
def del_attr(obj, names):
if len(names) == 1:
delattr(obj, names[0])
else:
del_attr(getattr(obj, names[0]), names[1:])
def set_attr(obj, names, val):
if len(names) == 1:
setattr(obj, names[0], val)
else:
set_attr(getattr(obj, names[0]), names[1:], val)
def make_functional(mod):
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
del_attr(mod, name.split("."))
names.append(name)
return orig_params, names
def load_weights(mod, names, params):
for name, p in zip(names, params):
set_attr(mod, name.split("."), p)
# your forward function with update
def forward(*new_params):
# this line replace your for loop
load_weights(mlp, names, new_params)
x = torch.ones((Arows,))
out = mlp(x)
loss = objective(out)
return loss
# your simple MLP model
class SimpleMLP(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, out_dim),
)
def forward(self, x):
'''Forward pass'''
return self.layers(x)
if __name__ == '__main__':
# your model instantiation
Arows = 2
Acols = 2
mlp = SimpleMLP(Arows, Acols)
# your vector computation
v = torch.ones((6,))
v_tensors = []
idx = 0
#this code "reshapes" the v vector as needed
for i, param in enumerate(mlp.parameters()):
numel = param.numel()
v_tensors.append(torch.reshape(torch.tensor(v[idx:idx+numel]), param.shape))
idx += numel
reshaped_v = tuple(v_tensors)
#make model's parameters functional
params, names = make_functional(mlp)
params = tuple(p.detach().requires_grad_() for p in params)
#compute hvp
soln = torch.autograd.functional.vhp(forward, params, reshaped_v, strict=True)
print(soln)
Did you try it with doubles instead of floats? I did some tests on my own that showed fairly large error when backproping with 32 bit float (on the order of 1e-5) compared to doubles.
I was following this tutorial and after creating the classes I still cannot import the desired module. The code I used:
import numpy as np
import sys
class ForwardEuler:
def __init__(self, f):
# test that f is a function
if not callable(f):
raise TypeError('f is %s, not a function' % type(f))
self.f = f
def set_initial_condition(self, U0):
self.U0 = float(U0)
def solve(self, time_points):
"""Compute u for t values in time_points list."""
self.t = np.asarray(time_points)
self.u = np.zeros(len(time_points))
self.u[0] = self.U0
for k in range(len(self.t)-1):
self.k = k
self.u[k+1] = self.advance()
return self.u, self.t
def advance(self):
"""Advance the solution one time step."""
u, f, k, t = self.u, self.f, self.k, self.t
dt = t[k+1] - t[k]
unew = u[k] + dt*f(u[k], t[k])
return unew
class ODESolver:
def __init__(self, f):
self.f = f
def advance(self):
"""Advance solution one time step."""
raise NotImplementedError # implement in subclass
def set_initial_condition(self, U0):
self.U0 = float(U0)
def solve(self, time_points):
self.t = np.asarray(time_points)
self.u = np.zeros(len(self.t))
# Assume that self.t[0] corresponds to self.U0
self.u[0] = self.U0
# Time loop
for k in range(n-1):
self.k = k
self.u[k+1] = self.advance()
return self.u, self.t
def advance(self):
raise NotImplemtedError # to be impl. in subclasses
class ForwardEuler(ODESolver):
def advance(self):
u, f, k, t = self.u, self.f, self.k, self.t
dt = t[k+1] - t[k]
unew = u[k] + dt*f(u[k], t)
return unew
Now, I want from ODESolver import ForwardEuler, but there is no module named ODESolver. How do I create it? I suppose there must be something with if __name__ == '__main__': and then the classes underneath, but that didn't work either.
the name of the file.py where you are writting this code is the name of module
you must name it ODESolver
then you can do
from ODESolver import ForwardEuler
I am trying to implement KMeans algorithm as a class and when i done all code i test with creating object and than train the dataset. But i get the following error.
self.distances = np.zeros((self.N, self.n_clusters))
TypeError: only integer scalar arrays can be converted to a scalar index
from abc import ABC, abstractmethod
class KMeansInterface(ABC):
#abstractmethod
def fit(self, X):
pass
#abstractmethod
def predict(self, X):
pass
class Kmeans(KMeansInterface):
def __init__(self, n_clusters):
self.n_clusters = n_clusters
self.labels = None
self.distances = None
self.N = X.shape[0]
self.data_points = X
self.prev_labels = None
def choose_clusters(self, k, X):
self.n_clusters = k
size = X.shape[0]
numbers = np.random.choice(size, k, replace=False)
return X[numbers]
def calculate_sum_squared_distance(self, data_points, clusters):
self.distances = np.zeros((self.N, self.n_clusters))
for i, centroid in enumerate(clusters):
distance = np.sum(np.square(data_points-centroid), axis=1)
self.distances[:, i] = distance
return self.distances
def calculate_closest_clusters(self, distances):
self.closest_clusters = np.argmin(self.distances, axis=1)
return self.closest_clusters
def update_clusters(self, labels, data_points):
new_clusters = []
for i in range(self.n_clusters):
points_in_cluster = data_points[labels==i]
values = np.mean(points_in_cluster, axis=0)
new_clusters.append(values)
return np.array(new_clusters)
def fit(self, X):
self.n_clusters = self.choose_clusters(self.n_clusters, X)
for i in range(20000):
self.distances = self.calculate_sum_squared_distance(X, self.n_clusters)
self.labels = self.calculate_closest_clusters(self.distances)
self.new_cluster = self.update_clusters(self.labels, X)
if self.prev_labels is not None:
if np.all(self.prev_labels == self.labels):
break
self.prev_labels = self.labels
fitted = True
return self.labels
def predict(self, X, clusters):
self.distances = self.calculate_sum_squared_distance(X, clusters)
self.labels = self.calculate_closest_clusters(self.distances)
return self.labels
kmean_object = KMeans(2)
kmean_object.fit(X)