Pyomo ValueError: Error retrieving component - python

My code is the following:
from coopr.pyomo import *
import numpy as np
from scipy.optimize import minimize
import math
model = ConcreteModel()
model.days = RangeSet(1, 31) #model.time)
T = model.days
M_b1_O_stored_T = Var(T,bounds=(0, None))
def obj_rule(model):
return sum( M_b1_O_stored_T[i] for i in model.days )
model.funcobj = Objective( rule =obj_rule , sense=maximize)
It shows the following error: ValueError: Error retrieving component IndexedVar[1]: The component has not been constructed.
Do anyone can please help me on this please? The constraints do not show problem, but the objective function is showing...

Welcome to the site...
You neglected to put your variable "into the model" with the model. prefix. Note my fix below in both the declaration and in your objective function.
from pyomo.environ import *
# from coopr.pyomo import *
# import numpy as np
# from scipy.optimize import minimize
# import math
model = ConcreteModel()
model.days = RangeSet(1, 31) #model.time)
# T = model.days
model.M_b1_O_stored_T = Var(model.days,bounds=(0, None))
def obj_rule(model):
return sum( model.M_b1_O_stored_T[i] for i in model.days )
model.funcobj = Objective( rule =obj_rule , sense=maximize)

Related

How to use tfp.density.Mixture with JointDistributionCoroutine

I'm trying to define a model function for MCMC.
The idea is to have a mixture of two distributions controlled with a probability ratio.
One of my attempts would look like this:
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
root = tfd.JointDistributionCoroutine.Root
def model_fn():
rv_p = yield root(tfd.Sample(tfd.Uniform(0.0,1.0),1))
catprobs = tf.stack([rv_p, 1.-rv_p],0)
rv_cat = tfd.Categorical(probs=catprobs)
rv_norm1 = tfd.Sample(tfd.Normal(0.0,1.0),1)
rv_norm2 = tfd.Sample(tfd.Normal(3.0,1.0),1)
rv_mix = yield tfd.Mixture(cat=rv_cat,
components=[
rv_norm1,
rv_norm2,
])
jd = tfd.JointDistributionCoroutine(model_fn)
jd.sample(2)
The code fails with:
ValueError: components[0] batch shape must be compatible with cat shape and other component batch shapes ((2, 2) vs ())
Could you give me an example of how to use Mixture distribution in a way that allows "any" shape of inputs?
I'm using tensorflow 2.4.1 and tensorflow_probability 0.12.1 with python 3.6
I figured it out. For reference here is a sample code:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
tfd = tfp.distributions
tfb = tfp.bijectors
import numpy as np
from time import time
numdata = 10000
data = np.random.normal(0.0,1.0,numdata).astype(np.float32)
data[int(numdata/2):] = 0.0
_=plt.hist(data,30,density=True)
root = tfd.JointDistributionCoroutine.Root
def dist_fn(rv_p,rv_mu):
rv_cat = tfd.Categorical(probs=tf.stack([rv_p, 1.-rv_p],-1))
rv_norm = tfd.Normal(rv_mu,1.0)
rv_zero = tfd.Deterministic(tf.zeros_like(rv_mu))
rv_mix = tfd.Independent(
tfd.Mixture(cat=rv_cat,
components=[rv_norm,rv_zero]),
reinterpreted_batch_ndims=1)
return rv_mix
def model_fn():
rv_p = yield root(tfd.Sample(tfd.Uniform(0.0,1.0),1))
rv_mu = yield root(tfd.Sample(tfd.Uniform(-1.,1. ),1))
rv_mix = yield dist_fn(rv_p,rv_mu)
jd = tfd.JointDistributionCoroutine(model_fn)
unnormalized_posterior_log_prob = lambda *args: jd.log_prob(args + (data,))
n_chains = 1
p_init = [0.3]
p_init = tf.cast(p_init,dtype=tf.float32)
mu_init = 0.1
mu_init = tf.stack([mu_init]*n_chains,axis=0)
initial_chain_state = [
p_init,
mu_init,
]
bijectors = [
tfb.Sigmoid(), # p
tfb.Identity(), # mu
]
step_size = 0.01
num_results = 50000
num_burnin_steps = 50000
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
num_leapfrog_steps=2,
step_size=step_size,
state_gradients_are_stopped=True),
bijector=bijectors)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=kernel, num_adaptation_steps=int(num_burnin_steps * 0.8))
#XLA optim
#tf.function(autograph=False, experimental_compile=True)
def graph_sample_chain(*args, **kwargs):
return tfp.mcmc.sample_chain(*args, **kwargs)
st = time()
trace,stats = graph_sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=kernel)
et = time()
print(et-st)
ptrace, mutrace = trace
plt.subplot(121)
_=plt.hist(ptrace.numpy(),100,density=True)
plt.subplot(122)
_=plt.hist(mutrace.numpy(),100,density=True)
print(np.mean(ptrace),np.mean(mutrace))

`loss` passed to Optimizer.compute_gradients should be a function when eager execution is enabled

I am new to TensorFlow, I just started learning and understanding it.
I am working on neural style transfer problem and I am using tensorflow version 1.14.
I am getting an error loss passed to Optimizer.compute_gradients should be a function when eager execution is enabled.
I tried to solve the problem by using TensorFlow graph instead of eager execution, but it's not working. I want to use eager execution because it looks like a more pythonic way.
here is my code, sorry for putting whole code here, please suggest corrections in my code.
import scipy
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy import misc
from skimage.transform import resize
from tensorflow.keras.applications.vgg19 import VGG19, preprocess_input
from tensorflow.keras import backend as K
tf.enable_eager_execution()
print('Eager execution {}'.format(tf.executing_eagerly()))
content_path = '800px-Green_Sea_Turtle_grazing_seagrass.jpg'
style_path = '800px-The_Great_Wave_off_Kanagawa.jpg'
content_img = plt.imread(content_path)
plt.imshow(content_img)
style_img = plt.imread(style_path)
plt.imshow(style_img)
MEANS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
content_img = resize(content_img, (552,800,3)) #resized content img because style img has shape (552,800,3)
content_img = np.array(content_img)
content_img = np.reshape(content_img, ((1,)+content_img.shape))
style_img = np.array(style_img)
style_img = np.reshape(style_img, ((1,)+style_img.shape))
noise_img= np.random.uniform(-20,20,(1,552,800,3)).astype('float32')
generated_img = noise_img*0.6 + content_img*0.4
plt.imshow(generated_img[0])
content_img = content_img-MEANS
style_img = style_img-MEANS
model = VGG19(include_top=False, weights='imagenet')
def compute_content_cost(act_content_img, act_generated_img):
return tf.reduce_mean(tf.square(act_content_img-act_generated_img))
def gram_matrix(A):
gram = tf.matmul(A, tf.transpose(A))
return gram
def style_loss_one_layer(act_style_img, act_generated_img):
m,n_H,n_W,n_C = tf.shape(act_generated_img) #act_generated_img.get_shape().as_list()
gram_act_style_img = gram_matrix(act_style_img)
gram_generated_img = gram_matrix(act_generated_img)
return tf.reduce_mean(tf.square(gram_act_style_img-gram_generated_img))*(1/(4*n_C**2*(n_H*n_W)**2))
content_layer = ['block5_conv2']
style_layers = [('block1_conv1',0.2),
('block2_conv1',0.2),
('block3_conv1',0.2),
('block4_conv1',0.2),
('block5_conv1',0.2)]
def compute_style_cost(model, style_layers):
style_cost = total_style_cost = 0
for layer, coeff in style_layers:
act_style_img = model.get_layer(layer).output
act_generated_img = model.get_layer(layer).output
style_cost += style_loss_one_layer(act_style_img, act_generated_img)
total_style_cost += coeff*style_cost
return total_style_cost
def compute_total_cost(J_content, J_style, alpha=10, beta=40):
J = (alpha*tf.cast(J_content, tf.float64)) + (beta*J_style)
return J
act_generated_img = model.get_layer('block5_conv2').output
act_content_img = model.get_layer('block5_conv2').output
J_content = compute_content_cost(act_content_img=act_content_img, act_generated_img=act_generated_img)
print(J_content)
J_style = compute_style_cost(model, style_layers=style_layers)
print(J_style)
J_total_cost = compute_total_cost(J_content, J_style, alpha=10, beta=40)
print(J_total_cost)
optimizer = tf.train.AdamOptimizer(2.0)
train_step = optimizer.minimize(J_total_cost) #**getting error here**
The above error is mainly caused when you are trying to use TensorFlow 1.x but the system is running tensor 2.0.
Initialise the TensorFlow using the code below to ensure you are trying to use the version 1.0
import tensorflow.compat.v1 as tf
You can make the system disable that behaviour by the below command after the initialisers.
tf.disable_v2_behavior()

ctypes array is not callable

I want to use the scipy.optimize.minimize function. The function contains commands from a DLL which require a ctypes array. The goal is to vary the inputs in the ctypes array to optimize a specific output which is also a ctypes array (see code below).
import os
import ctypes
import tkinter as tk
from PIL import ImageTk
from tkinter import filedialog
import numpy as np
from scipy.optimize import minimize
dll = ctypes.cdll.LoadLibrary(library)
LoadModelDef = dll.addModelDef(model)
nrExperiments = 1
nrin = dll.getNumInputs(LoadModelDef)
PDBL2ARR = ctypes.c_double * nrin * nrExperiments
inputs = PDBL2ARR()
inputs_init = PDBL2ARR()
def evaluaterel(library,Model,InputArray):
nrExp = len(InputArray)
DBL2ARR = ctypes.c_double * nrExp
outputs = DBL2ARR()
for i in range(2,13):
Name= outputName(Model,i)
library.evalVBA(Model,InputArray,nrExp,i,outputs)
for i in range(nrExp):
Value = str(outputs[i])
# text = label.cget("text") + '\n' + str(Name)+ ' ' + str(Value)
# label.configure(text=text)
return outputs
data = np.array([line.split()[-1] for line in open("DATA.txt")], dtype=np.float64)
for i in range(nrExperiments):
for j in range(nrin):
inputs_init[i][j]= 0
for i in range(nrExperiments):
for j in range(0,nrin):
inputs[i][j]=data[j]
solution=minimize(evaluaterel(dll,LoadModelDef,inputs),inputs_init,method='SLSQP')
print(solution)
File "c:\app\python27\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
return function(*(wrapper_args + args))
TypeError: 'c_double_Array_1' object is not callable
According to [SciPy.Docs]: scipy.optimize.minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None), the 1st argument should be a callable (function, in your case). But, you're calling the function yourself when passing it, and therefore you're passing the function return value.
Modify your code (faulty line) to:
solution = minimize(evaluaterel, inputs_init, args=(dll, LoadModelDef, inputs), method="SLSQP")

what is the use of .data in pytorch

I just got the code from https://github.com/heykeetae/Self-Attention-GAN (the file is spectral.py). Partial code is under there. I don't really understand what is the use of the .data, is this a method in some class? if it is, which class does it belong to?
import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
OK so SpectralNorm.__init__ sets self.module = module and self.name = name (default: weight) which is a constructor argument. This seems to be called like so SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1))) so module is an nn.Conv2d instance which subclasses nn.Module -- following the trail we finally find the answer

component object has no attribute 'ln_solver'

I'm building a large new OpenMDAO component. When I run it, OpenMDAO crashes with AttributeError: 'myNewComponent' object has no attribute 'ln_solver' during the setup stage. What does this message mean?
import numpy as np
from openmdao.api import Group, Component, Problem, IndepVarComp, ParallelGroup
from openmdao.api import ScipyOptimizer
from openmdao.core.mpi_wrap import MPI
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
else:
from openmdao.api import BasicImpl as impl
class WindSEComp(Component):
def __init__(self, nTurbs, rotor_diameter):
super(WindSEComp, self).__init__()
self.add_param('turbineX', val=np.ones(nTurbs), units='m', desc='x positions of turbines in original ref. frame')
self.add_output('AEP', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
mx_opt = params['turbineX']
unknowns['AEP'] = np.sum(mx_opt)
def linearize(self, params, unknowns, resids):
mx_opt = params['turbineX']
J = {}
J['AEP', 'turbineX'] = 3 * mx_opt
return J
prob = Problem(impl=impl, root=WindSEComp(nTurbs=4, rotor_diameter=126.0))
#prob.driver = ScipyOptimizer()
#prob.driver.add_desvar('turbineX')
#prob.driver.add_objective('AEP')
prob.setup()
prob.run()
You're trying to use a component like a group: these are not the same. You want to do something like this:
top = Problem()
root = top.root = Group()
root.add('g', WindSEComp(nTurbs=4, rotor_diameter=126.0))
top.setup()
top.run()

Categories

Resources