Why isn't the output recognized with the promotion? - python

I am trying to change from ''connect'' to ''promotes'' for paraboloid example (without constraint) and I receive the output error.
Output not found for response 'parab.f_xy' in system ''.
Feel free to have a look at the code below, I tried to make it similar to the topic explained in the ""Linking Variables with Promotion vs. Connection"". The code is independent from the released paraboloid component.
from openmdao.api import Problem, ScipyOptimizeDriver, IndepVarComp
from openmdao.core.explicitcomponent import ExplicitComponent
class Paraboloid(ExplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0)
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def compute_partials(self, inputs, partials):
x = inputs['x']
y = inputs['y']
partials['f_xy', 'x'] = 2.0*x - 6.0 + y
partials['f_xy', 'y'] = 2.0*y + 8.0 + x
# build the model
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', 3.0)
indeps.add_output('y', -4.0)
prob.model.add_subsystem('parab', Paraboloid(), promotes_inputs=['x','y'] , promotes_outputs=['f_xy'])
# setup the optimization
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.model.add_design_var('indeps.x', lower=-50, upper=50)
prob.model.add_design_var('indeps.y', lower=-50, upper=50)
prob.model.add_objective('parab.f_xy')
prob.setup()
prob.run_driver()

Because you promoted 'f_xy' up from the 'parab' component, your objective should now be named 'f_xy' instead of 'parab.f_xy', and because you promoted 'x' and 'y' up from the 'indeps' component, your design variables should be named 'x' and 'y' instead of 'indeps.x' and 'indeps.y'.

Related

Pytorch Runtime Error about inplace operation

I am developing a policy gradient NN with pytorch(version: 1.10.1) and I am having the run time error message as:
The error message as:
RuntimeError: one of the variables needed for gradient computation has been modified by an in-place operation: [torch.FloatTensor [1, 15]] is at version 1; expected version 0 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
I had read some similar discussions and people suggest trying to avoid doing a += 1. There are also some discussions that suggest downgrading the pytorch. People also suggest using the clone() instead modify the tensor. I had tried them all but I still have this error.
The error does not show everytime in the update() function. Sometimes it works well. Why does this happen?
My related code is as following, there are some weird variables names such as previous_R. I used them to avoid in place operation such as a = a + 1
NN class:
class NN(nn.Module):
"""
Feel free to change the architecture for different tasks!
"""
def __init__(self, env):
super(NN, self).__init__()
# 15 in this case
self.state_size = 15
# 31 (1 and -1 for each task-server and void) (m*n*2 + 1)
self.action_size = 31
self.linear1 = nn.Linear(self.state_size, 128)
self.linear2 = nn.Linear(128, 256)
self.linear3 = nn.Linear(256, self.action_size)
def forward(self, state):
output1 = F.relu(self.linear1(state))
output2 = F.relu(self.linear2(output1.clone()))
output3 = self.linear3(output2.clone())
# Note the conversion to Pytorch distribution.
distribution = Categorical(F.softmax(output3.clone(), dim=-1))
return distribution
Reinforcement Learning part related code:
class Agent():
def __init__(self, env, lr, gamma):
self.env = env
self.NN = NN(env)
self.lr = lr
self.optim_NN = optim.Adam(self.NN.parameters(), lr = self.lr)
self.gamma = gamma
def update(self, log_probs,returns):
with torch.autograd.set_detect_anomaly(True):
print("updating")
baselines = self.compute_baselines(returns.clone())
loss = self.compute_loss(log_probs.clone(), returns, baselines)
self.optim_NN.zero_grad()
loss.backward()
self.optim_NN.step()
def compute_returns(self,rewards):
R = 0
returns = []
for r in rewards[::-1]:
pre_R = R
R = r + self.gamma*pre_R
returns.insert(0,R)
returns = torch.tensor(returns)
return returns
def compute_baselines(self,returns):
baselines = []
baselines.append(returns[0])
for v in returns:
t = len(baselines)
b = (baselines[t-1]*t + v)/(t+1)
baselines.append(b)
return baselines
def compute_loss(self, log_probs,returns, baselines):
with torch.autograd.set_detect_anomaly(True):
loss = 0
for i in range(0,len(returns)):
l = log_probs[i].clone()
r = returns[i].clone()
b = baselines[i].clone()
pre_loss = loss
loss =pre_loss + (-l*(r-b))
# losses.append(loss)
# losses.append(-log_probs[i].clone()*(returns[i].clone()-baselines[i].clone()))
policy_loss = loss
return policy_loss

How to solve non-numeric type in numeric context error in pyomo python

This is the code. I think the solver could be glpk instead of gurobi. I got the error before it tries to solve the problem.
from pyomo.environ import *
from pyomo.opt import SolverFactory, SolverStatus
PrintSolverOutput = False ###
model = ConcreteModel()
model.dual = Suffix(direction =Suffix.IMPORT)
model.X = Var(I,T, within = NonNegativeReals)
model.In = Var(I,T, within = NonNegativeReals)
model.S = Var(T, within = NonNegativeReals)
def Objetivo(model):
return (sum(C[i]*model.X[i,t]*((1+tau[i])**(t-1))+Ch[i]*model.In[i,t]
for i in I for t in T)
+sum(Cs*model.S[t]
for t in T)
)
model.Total_Cost = Objective(rule = Objetivo, sense= minimize)
def Balance(model,i,t):
if t==1:
return model.X[i,t]+I0[i]-model.In[i,t]==D[i,t]
elif t>=2:
return model.X[i,t]+model.IN[i,t-1]-model.In[i,t]==D[i,t]
def Horas(model, t):
return sum(r[i]*model.X[i,t] for i in I) <= H+model.S[t]
def Limite(model,t):
return model.S[T]<=LS
model.RBalance = Constraint(I,T,rule=Balance)
model.RHoras = Constraint(T,rule=Horas)
model.RLimiteoras = Constraint(T,rule=Limite)
opt = SolverFactory("gurobi")
This is the data
I forgot to put the data before.
T = [1,2,3,4,5,6]
I = ['A','B']
D = {('A',1):300,
('A',2):400,
('A',3):500,
('A',4):600,
('A',5):800,
('A',6):700,
('B',1):700,
('B',2):600,
('B',3):500,
('B',4):400,
('B',5):300,
('B',6):400}
tau = {'A':0.02,'B':0.01}
r = {'A':5,'B':3}
H = 3520
LS = 800
C = {'A':150,'B':120}
Ch = {'A':8,'B':4}
Cs = 6
I0 = {'A':100,'B':250}
error
The code is from a youtube tutorial and it worked for him but not for me. Why?
Aside from fixing two typos in your code, this model computes and solves for me without that error. Make these fixes, run it again, re-post the exact error with line number and the exact code that produces the error if stuck...

How can I incorporate PReLU in a quantized model?

I'm trying to quantize a model which uses PReLU. Replacing PReLU with ReLU is not possible as it drastically affects the network performance to the point its useless.
As far as I know, PReLU is not supported in Pytorch when it comes to quantization. So I tried to rewrite this module manually and implement the multiplication and additions using torch.FloatFunctional() to get around this limitation.
This is what I have come up so far:
class PReLU_Quantized(nn.Module):
def __init__(self, prelu_object):
super().__init__()
self.weight = prelu_object.weight
self.quantized_op = nn.quantized.FloatFunctional()
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, inputs):
# inputs = torch.max(0, inputs) + self.weight * torch.min(0, inputs)
self.weight = self.quant(self.weight)
weight_min_res = self.quantized_op.mul(self.weight, torch.min(inputs)[0])
inputs = self.quantized_op.add(torch.max(inputs)[0], weight_min_res).unsqueeze(0)
self.weight = self.dequant(self.weight)
return inputs
and for the replacement :
class model(nn.Module):
def __init__(self)
super().__init__()
....
self.prelu = PReLU()
self.prelu_q = PReLU_Quantized(self.prelu)
....
Basically, I read the learned parameter of the existing prelu module, and run the calculation myself in a new module. The module seems to be working in the sense, its not failing the whole application.
However, in order to assess whether my implementation is actually correct and yields the same result as the original module, I tried to test it.
Here is a counterpart for normal models (i.e. not quantized model):
For some reason, the error between the actual PReLU and my implementation is very large!
Here are sample diffs in different layers:
diff : 1.1562038660049438
diff : 0.02868632599711418
diff : 0.3653906583786011
diff : 1.6100226640701294
diff : 0.8999372720718384
diff : 0.03773299604654312
diff : -0.5090572834014893
diff : 0.1654307246208191
diff : 1.161868691444397
diff : 0.026089997962117195
diff : 0.4205571115016937
diff : 1.5337920188903809
diff : 0.8799554705619812
diff : 0.03827812895178795
diff : -0.40296515822410583
diff : 0.15618863701820374
and the diff is calculated like this in the forward pass:
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out2 = self.prelu2(out)
print(f'diff : {( out - out2).mean().item()}')
out = self.conv2(out)
...
This is the normal implementation which I used on ordinary model (i.e. not quantized!) to asses whether it produces correct result and then move on to quantized version:
class PReLU_2(nn.Module):
def __init__(self, prelu_object):
super().__init__()
self.prelu_weight = prelu_object.weight
self.weight = self.prelu_weight
def forward(self, inputs):
x = self.weight
tmin, _ = torch.min(inputs,dim=0)
tmax, _ = torch.max(inputs,dim=0)
weight_min_res = torch.mul(x, tmin)
inputs = torch.add(tmax, weight_min_res)
inputs = inputs.unsqueeze(0)
return inputs
What am I missing here?
I figured it out! I made a huge mistake in the very begining. I needed to calculate
PReLU(x)=max(0,x)+a∗min(0,x)
or
and not the actual torch.min! or torch.max! which doesn't make any sense!
Here is the final solution for normal models (i.e not quantized)!:
class PReLU_2(nn.Module):
def __init__(self, prelu_object):
super().__init__()
self.prelu_weight = prelu_object.weight
self.weight = self.prelu_weight
def forward(self, inputs):
pos = torch.relu(inputs)
neg = -self.weight * torch.relu(-inputs)
inputs = pos + neg
return inputs
and this is the quantized version :
class PReLU_Quantized(nn.Module):
def __init__(self, prelu_object):
super().__init__()
self.prelu_weight = prelu_object.weight
self.weight = self.prelu_weight
self.quantized_op = nn.quantized.FloatFunctional()
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, inputs):
# inputs = max(0, inputs) + alpha * min(0, inputs)
self.weight = self.quant(self.weight)
weight_min_res = self.quantized_op.mul(-self.weight, torch.relu(-inputs))
inputs = self.quantized_op.add(torch.relu(inputs), weight_min_res)
inputs = self.dequant(inputs)
self.weight = self.dequant(self.weight)
return inputs
Side note:
I also had a typo where I was calculating the diff :
out = self.prelu(out)
out2 = self.prelu2(out)
print(f'diff : {( out - out2).mean().item()}')
out = self.conv2(out)
needs to be
out1 = self.prelu(out)
out2 = self.prelu2(out)
print(f'diff : {( out1 - out2).mean().item()}')
out = self.conv2(out1)
Update:
In case you face issues in quantization, you may try this version :
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.quantized as nnq
from torch.quantization import fuse_modules
class QPReLU(nn.Module):
def __init__(self, num_parameters=1, init: float = 0.25):
super(QPReLU, self).__init__()
self.num_parameters = num_parameters
self.weight = nn.Parameter(torch.Tensor(num_parameters).fill_(init))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.f_mul_neg_one1 = nnq.FloatFunctional()
self.f_mul_neg_one2 = nnq.FloatFunctional()
self.f_mul_alpha = nnq.FloatFunctional()
self.f_add = nnq.FloatFunctional()
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
self.quant2 = torch.quantization.QuantStub()
self.quant3 = torch.quantization.QuantStub()
# self.dequant2 = torch.quantization.QuantStub()
self.neg_one = torch.Tensor([-1.0])
def forward(self, x):
x = self.quant(x)
# PReLU, with modules only
x1 = self.relu1(x)
neg_one_q = self.quant2(self.neg_one)
weight_q = self.quant3(self.weight)
x2 = self.f_mul_alpha.mul(
weight_q, self.f_mul_neg_one2.mul(
self.relu2(
self.f_mul_neg_one1.mul(x, neg_one_q),
),
neg_one_q)
)
x = self.f_add.add(x1, x2)
x = self.dequant(x)
return x
m1 = nn.PReLU()
m2 = QPReLU()
# check correctness in fp
for i in range(10):
data = torch.randn(2, 2) * 1000
assert torch.allclose(m1(data), m2(data))
# toy model
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.prelu = QPReLU()
def forward(self, x):
x = self.prelu(x)
return x
# quantize it
m = M()
m.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(m, inplace=True)
# calibrate
m(torch.randn(4, 4))
# convert
torch.quantization.convert(m, inplace=True)
# run some data through
res = m(torch.randn(4, 4))
print(res)
and make sure to read the ralted notes here

OpenMDAO/ScipyOptimizer "UnboundLocalError: local variable 'f_new' referenced before assignment"

Trying to run the paraboloid example from OpenMDAO documentation but using the ScipyOptimizer, and I get the error in the title. Not sure what I'm doing wrong. I've attached a MWE below.
OpenMDAO 1.7.3, Python 3.6.0.
# -*- coding: utf-8 -*-
"""
Paraboloid tutorial from OpenMDAO documentation:
https://openmdao.readthedocs.io/en/latest/usr-guide/tutorials/paraboloid-tutorial.html
"""
# import print function from Python 3 if we are using Python 2
from __future__ import print_function
# import all the components we need from openmdao module
from openmdao.api import IndepVarComp, Component, Problem, Group, ScipyOptimizer
class Paraboloid(Component):
""" Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """
def __init__(self):
super(Paraboloid, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y', val=0.0)
self.add_output('f_xy', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = params['x']
y = params['y']
unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def linearize(self, params, unknowns, resids):
""" Jacobian for our paraboloid."""
x = params['x']
y = params['y']
J = {}
J['f_xy', 'x'] = 2.0*x - 6.0 + y
J['f_xy', 'y'] = 2.0*y + 8.0 + x
return J
# This if statement executes only if the script is run as a script, not if the
# script is imported as a module. It's not necessary for this demo, but it
# is good coding practice.
if __name__ == "__main__":
# initiallize the overall problem
top = Problem()
# each problem has a root "group" that contains the component evaluating
# the objective function
root = top.root = Group()
# define components with the independent variables and their initial guesses
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
# add the component that defines our objective function
root.add('p', Paraboloid())
# connect the components together
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
# specify our driver for the problem (optional)
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['disp'] = True # don't display optimizer output
# set up the problem
top.setup()
# run the optimization
top.run()
print(top['p.f_xy'])
I figured it out. In a classic moment of brain-fartiness, I forgot to add my design variables and objective functions to the driver. Something like this is what I was missing:
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('p1.x', lower=-50, upper=50)
top.driver.add_desvar('p2.y', lower=-50, upper=50)
top.driver.add_objective('p.f_xy')

Python milk library: object weights issue

I'm trying to use one_vs_one composition of decision trees for multiclass classification. The problem is, when I pass different object weights to a classifier, the result stays the same.
Do I misunderstand something with weights, or do they just work incorrectly?
Thanks for your replies!
Here is my code:
class AdaLearner(object):
def __init__(self, in_base_type, in_multi_type):
self.base_type = in_base_type
self.multi_type = in_multi_type
def train(self, in_features, in_labels):
model = AdaBoost(self.base_type, self.multi_type)
model.learn(in_features, in_labels)
return model
class AdaBoost(object):
CLASSIFIERS_NUM = 100
def __init__(self, in_base_type, in_multi_type):
self.base_type = in_base_type
self.multi_type = in_multi_type
self.classifiers = []
self.weights = []
def learn(self, in_features, in_labels):
labels_number = len(set(in_labels))
self.weights = self.get_initial_weights(in_labels)
for iteration in xrange(AdaBoost.CLASSIFIERS_NUM):
classifier = self.multi_type(self.base_type())
self.classifiers.append(classifier.train(in_features,
in_labels,
weights=self.weights))
answers = []
for obj in in_features:
answers.append(self.classifiers[-1].apply(obj))
err = self.compute_weighted_error(in_labels, answers)
print err
if abs(err - 0.) < 1e-6:
break
alpha = 0.5 * log((1 - err)/err)
self.update_weights(in_labels, answers, alpha)
self.normalize_weights()
def apply(self, in_features):
answers = {}
for classifier in self.classifiers:
answer = classifier.apply(in_features)
if answer in answers:
answers[answer] += 1
else:
answers[answer] = 1
ranked_answers = sorted(answers.iteritems(),
key=lambda (k,v): (v,k),
reverse=True)
return ranked_answers[0][0]
def compute_weighted_error(self, in_labels, in_answers):
error = 0.
w_sum = sum(self.weights)
for ind in xrange(len(in_labels)):
error += (in_answers[ind] != in_labels[ind]) * self.weights[ind] / w_sum
return error
def update_weights(self, in_labels, in_answers, in_alpha):
for ind in xrange(len(in_labels)):
self.weights[ind] *= exp(in_alpha * (in_answers[ind] != in_labels[ind]))
def normalize_weights(self):
w_sum = sum(self.weights)
for ind in xrange(len(self.weights)):
self.weights[ind] /= w_sum
def get_initial_weights(self, in_labels):
weight = 1 / float(len(in_labels))
result = []
for i in xrange(len(in_labels)):
result.append(weight)
return result
As you can see, it is just a simple AdaBoost (I instantiated it with in_base_type = tree_learner, in_multi_type = one_against_one) and it worked the same way no matter how many base classifiers were engaged. It just acted as one multiclass decision tree.
Then I've made a hack. I chose a random sample of objects on the each iteration with respect to their weights and trained classifiers with a random subset of objects without any weights. And that worked as it was supposed to.
The default tree criterion, namely information gain, does not take the weights into account. If you know of a formula which would do it, I'll implement it.
In the meanwhile, using neg_z1_loss will do it correctly. By the way, there was a slight bug in that implementation, so you will need to use the most current github master.

Categories

Resources