opt = SolverFactory("glpk")
opt.options["mipgap"] = 0.05
opt.options["FeasibilityTol"] = 1e-05
solver_manager = SolverManagerFactory("serial")
# results = solver_manager.solve(instance, opt=opt, tee=True,timelimit=None, mipgap=0.1)
results = solver_manager.solve(model, opt=opt, tee=True, timelimit=None)
# sends results to stdout
# results.write()
def pyomo_save_results(options=None, instance=None, results=None):
OUTPUT = open(r'Results_generic_hub.txt', 'w')
print(results, file=OUTPUT)
OUTPUT.close()
It generates the following error. GLPK is installed with GLPSOL -- help working from any directory. Is this a problem with the GLPK module? Or with the model itself? Environment: - Conda, Mac OS Yosemite.
File "<ipython-input-7-ba156f9322b2>", line 7, in <module>
results = solver_manager.solve(model, opt=opt, tee=True,timelimit=None)
File "/anaconda/lib/python3.6/site-
packages/pyomo/opt/parallel/async_solver.py", line 34, in solve
return self.execute(*args, **kwds)
File "/anaconda/lib/python3.6/site-
packages/pyomo/opt/parallel/manager.py", line 107, in execute
ah = self.queue(*args, **kwds)
File "/anaconda/lib/python3.6/site-
packages/pyomo/opt/parallel/manager.py", line 122, in queue
return self._perform_queue(ah, *args, **kwds)
File "/anaconda/lib/python3.6/site-
packages/pyomo/opt/parallel/local.py", line 59, in _perform_queue
results = opt.solve(*args, **kwds)
File "/anaconda/lib/python3.6/site-packages/pyomo/opt/base/solvers.py", line 582, in solve
self._presolve(*args, **kwds)
File "/anaconda/lib/python3.6/site-packages/pyomo/opt/solver/shellcmd.py", line 196, in _presolve
OptSolver._presolve(self, *args, **kwds)
File "/anaconda/lib/python3.6/site-packages/pyomo/opt/base/solvers.py", line 661, in _presolve
**kwds)
File "/anaconda/lib/python3.6/site-packages/pyomo/opt/base/solvers.py", line 729, in _convert_problem
**kwds)
File "/anaconda/lib/python3.6/site-packages/pyomo/opt/base/convert.py", line 110, in convert_problem
problem_files, symbol_map = converter.apply(*tmp, **tmpkw)
File "/anaconda/lib/python3.6/site-packages/pyomo/solvers/plugins/converter/model.py", line 86, in apply
io_options=io_options)
File "/anaconda/lib/python3.6/site-packages/pyomo/core/base/block.py", line 1646, in write
io_options)
File "/anaconda/lib/python3.6/site-packages/pyomo/repn/plugins/cpxlp.py", line 163, in __call__
include_all_variable_bounds=include_all_variable_bounds)
File "/anaconda/lib/python3.6/site-packages/pyomo/repn/plugins/cpxlp.py", line 575, in _print_model_LP
" cannot write legal LP file" % str(model.name))
ValueError: ERROR: No objectives defined for input model 'unknown'; cannot write legal LP file
The error you are seeing:
"ERROR: No objectives defined for input model 'unknown'; cannot write legal LP file"
indicates that Pyomo cannot find an active Objective component on your model (either you never added one to the model, or the Objective component(s) were all deactivated). Either way, valid LP files (which is how Pyomo interfaces with GLPK) require an objective. Fixing your model by adding an Objective should resolve this error.
Try this code in the end of the script:
> instance = model.create() instance.pprint() opt =
> SolverFactory("glpk") results = opt.solve(instance)
> print(results)
`
Related
I'm trying to convert a pre-trained model from PyTorch to CoreML. I have created a script to achieve the same. I'm able to load and convert the model to TorchScript from both of the methods. (i.e. Tracing and Scripting)
However, when calling the coremltools.convert() method for the traced or scripted model it throws an error.
I have mentioned the scripts for both methods along with errors thrown.
System Information
MacOS = 12.4
Python = 3.9
protobuf = 3.19.0
coremltools = 6.0b1
torch = 1.10.2
torchvision = 0.11.3
Note - I have tried with multiple versions of the libraries I have mentioned above but that does not help me in any way.
Method 1 -> Tracing
Code -
import coremltools as coremltools
import numpy as np
import torch
import torchvision as torchvision
def do_trace(in_model, in_input):
model_trace = torch.jit.trace(in_model, in_input)
model_trace.eval()
return model_trace
def dict_to_tuple(out_dict):
if "masks" in out_dict.keys():
return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"]
return out_dict["boxes"], out_dict["scores"], out_dict["labels"]
class PredictionModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
def forward(self, in_input):
output = self.model(in_input)
return dict_to_tuple(output[0])
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, 300, 300)))
model = PredictionModel().eval()
with torch.no_grad():
output = model(inp)
trace_model = do_trace(model, inp)
ml_model = coremltools.convert(trace_model, inputs=[coremltools.TensorType(shape=(1, 3, 300, 300))])
print(ml_model)
Error -
Converting PyTorch Frontend ==> MIL Ops: 3%|▎ | 74/2627 [00:00<00:05, 436.01 ops/s]
Traceback (most recent call last):
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 91, in _perform_torch_convert
prog = converter.convert()
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 263, in convert
convert_nodes(self.context, self.graph)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 89, in convert_nodes
add_op(context, node)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 3973, in reciprocal
context.add(mb.inverse(x=inputs[0], name=node.name))
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/mil/ops/registry.py", line 63, in add_op
return cls._add_op(op_cls, **kwargs)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/mil/builder.py", line 191, in _add_op
new_op.type_value_inference()
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/mil/operation.py", line 244, in type_value_inference
output_vals = self._auto_val(output_types)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/mil/operation.py", line 354, in _auto_val
builtin_val.val = v
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/mil/types/type_tensor.py", line 93, in val
raise ValueError(
ValueError: tensor should have value of type ndarray, got <class 'numpy.float32'> instead
Method 2 -> Scripting
Code -
import coremltools as coremltools
import torch
import torchvision as torchvision
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
script_model = torch.jit.script(model)
ml_model = coremltools.convert(script_model, inputs=[coremltools.TensorType(shape=(1, 3, 300, 300))])
print(ml_model)
Error -
WARNING:root:Support for converting Torch Script Models is experimental. If possible you should use a traced model for conversion.
Traceback (most recent call last):
File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/pydevd.py", line 1491, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/techlead/PycharmProjects/conversion_demo/main.py", line 8, in
ml_model = coremltools.convert(script_model, inputs=[coremltools.TensorType(shape=(1, 3, 300, 300))])
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/_converters_entry.py", line 426, in convert
mlmodel = mil_convert(
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 182, in mil_convert
return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 209, in _mil_convert
proto, mil_program = mil_convert_to_proto(
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 272, in mil_convert_to_proto
prog = frontend_converter(model, **kwargs)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/converter.py", line 104, in call
return load(*args, **kwargs)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 51, in load
converter = TorchConverter(torchscript, inputs, outputs, cut_at_symbols)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 158, in init
raw_graph, params_dict = self._expand_and_optimize_ir(self.torchscript)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 478, in _expand_and_optimize_ir
graph, params_dict = TorchConverter._jit_pass_lower_graph(graph, torchscript)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 423, in _jit_pass_lower_graph
_lower_graph_block(graph)
File "/Users/techlead/PycharmProjects/conversion_demo/venv/lib/python3.9/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 402, in _lower_graph_block
module = getattr(node_to_module_map[_input], attr_name)
KeyError: images.7 defined in (%images.7 : torch.torchvision.models.detection.image_list.ImageList, %targets.31 : Dict(str, Tensor)[]? = prim::TupleUnpack(%405)
)
If you try to run any of the above snippets you'll see that the model gets successfully converted to TorchScipt (Trace and Script) but the last step i.e. to convert the torch script model to coreml fails. Please have a look at this issue and let me know how I can move further with this. Also, if I'm doing something wrong (for eg - Passing the inputs wrong) let me know as well in that case. This is me first time doing this, so i'm kind of a noob. Any help is appreciated. Thank you!
I know there is a very similar question and answer on stackoverflow (here), but this seems to be distinctly different. I am using statsmodels v 0.13.2, and I am using an ARIMA model as opposed to a SARIMAX model.
I am trying to fit a list of time series data sets with an ARIMA model. The offending piece of my code is here:
import numpy as np
from statsmodels.tsa.arima.model import ARIMA
items = np.log(og_items)
items['count'] = items['count'].apply(lambda x: 0 if math.isnan(x) or math.isinf(x) else x)
model = ARIMA(items, order=(14, 0, 7))
trained = model.fit()
items is a dataframe containing a date index and a single column, count.
I apply the lambda on the second line because some counts can be 0, resulting in a negative infinity after log is applied. The final product going into the ARIMA does not contain any NaNs or Infinite numbers. However, when I try this without using the log function, I do not get the error. This only occurs on certain series, but there does not seem to be rhyme or reason to which are affected. One series had about half of its values as zero after applying the lambda, while another did not have a single zero. Here is the error:
Traceback (most recent call last):
File "item_pipeline.py", line 267, in <module>
main()
File "item_pipeline.py", line 234, in main
restaurant_predictions = make_predictions(restaurant_data=restaurant_data, models=models,
File "item_pipeline.py", line 138, in make_predictions
predictions = model(*data_tuple[:2], min_date=min_date, max_date=max_date,
File "/Users/rob/Projects/5out-ml/models/item_level/items/predict_arima.py", line 127, in predict_daily_arima
predict_date_arima(prediction_dict, item_dict, prediction_date, x_days_out=x_days_out, log_vals=log_vals,
File "/Users/rob/Projects/5out-ml/models/item_level/items/predict_arima.py", line 51, in predict_date_arima
raise e
File "/Users/rob/Projects/5out-ml/models/item_level/items/predict_arima.py", line 47, in predict_date_arima
fitted = model.fit()
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/arima/model.py", line 390, in fit
res = super().fit(
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 704, in fit
mlefit = super(MLEModel, self).fit(start_params, method=method,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/base/model.py", line 563, in fit
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/base/optimizer.py", line 241, in _fit
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/base/optimizer.py", line 651, in _fit_lbfgs
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_lbfgsb_py.py", line 199, in fmin_l_bfgs_b
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_lbfgsb_py.py", line 362, in _minimize_lbfgsb
f, g = func_and_grad(x)
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 286, in fun_and_grad
self._update_grad()
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 256, in _update_grad
self._update_grad_impl()
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 173, in update_grad
self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_numdiff.py", line 505, in approx_derivative
return _dense_difference(fun_wrapped, x0, f0, h,
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_numdiff.py", line 576, in _dense_difference
df = fun(x) - f0
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_numdiff.py", line 456, in fun_wrapped
f = np.atleast_1d(fun(x, *args, **kwargs))
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 137, in fun_wrapped
fx = fun(np.copy(x), *args)
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/base/model.py", line 531, in f
return -self.loglike(params, *args) / nobs
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 939, in loglike
loglike = self.ssm.loglike(complex_step=complex_step, **kwargs)
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/statespace/kalman_filter.py", line 983, in loglike
kfilter = self._filter(**kwargs)
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/statespace/kalman_filter.py", line 903, in _filter
self._initialize_state(prefix=prefix, complex_step=complex_step)
File "/Users/rob/Projects/5out-ml/venv/lib/python3.8/site-packages/statsmodels/tsa/statespace/representation.py", line 983, in _initialize_state
self._statespaces[prefix].initialize(self.initialization,
File "statsmodels/tsa/statespace/_representation.pyx", line 1362, in statsmodels.tsa.statespace._representation.dStatespace.initialize
File "statsmodels/tsa/statespace/_initialization.pyx", line 288, in statsmodels.tsa.statespace._initialization.dInitialization.initialize
File "statsmodels/tsa/statespace/_initialization.pyx", line 406, in statsmodels.tsa.statespace._initialization.dInitialization.initialize_stationary_stationary_cov
File "statsmodels/tsa/statespace/_tools.pyx", line 1206, in statsmodels.tsa.statespace._tools._dsolve_discrete_lyapunov
numpy.linalg.LinAlgError: LU decomposition error.
The solution in the other stackoverflow post was to initialize the statespace differently. It looks like the statespace is involved, if you look at the last few lines of the error. However, it does not seem that that workflow is exposed in the newer version of statsmodels. Is it? If not, what else can I try to circumvent this error?
So far, I have tried manually initializing the model to approximate diffuse, and manually setting the initialize property to approximate diffuse. Neither seem to be valid in the new statsmodels code.
Turns out there's a new way to initialize. The second line below is the operative line.
model = ARIMA(items, order=(14, 0, 7))
model.initialize_approximate_diffuse() # this line
trained = model.fit()
I'm trying to distribute the training of a Deep Reinforcement Learning system that I have built using Ray and Tensorflow 1. Meanwhile, I am using ray because I have a lot of code that parallelizes logic not directly related to the training, I would like to parallelize the training (namely the gradient reduction over different workers on different GPUs) using tf. distribute utilities, mainly, because It can use the NCCL communication library, which I supposed will boost the training speed compared with other approaches.
The problem is that I don't want to refactor my tensorflow code (written in old Tensorflow 1 at a low level, with custom training loops, I am not using any API like Keras), but I can't figure out how to use the tf.distribute, namely the MirrorStrategy, to distribute the training using Tensorflow 1 code.
I have found this guide about tf.distribute in Tensorflow 1, but even in the custom loop they are using the Keras API for the model and the optimizer building. I am trying to follow this guide as far as possible in order to build a simple example that uses the libraries/API that I am using in my main project, but I cannot make it works.
The example code is this:
import numpy as np
import tensorflow.compat.v1 as tf
import ray
tf.disable_v2_behavior()
#ray.remote(num_cpus=1, num_gpus=0)
class Trainer:
def __init__(self, local_data):
tf.disable_v2_behavior()
self.current_w = 1.0
self.local_data = local_data
self.strategy = tf.distribute.MirroredStrategy()
with self.strategy.scope():
self.w = tf.Variable(((1.0)), dtype=tf.float32)
self.x = tf.placeholder(shape=(None, 1), dtype=tf.float32)
self.y = self.w * self.x
self.grad = tf.gradients(self.y, [self.w])
def train_step_opt():
def grad_fn():
grad = tf.gradients(self.y, [self.w])
return grad
per_replica_grad = self.strategy.experimental_run_v2(grad_fn)
red_grad = self.strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_grad, axis=None)
minimize = self.w.assign_sub(red_grad[0])
return minimize
self.minimize = self.strategy.experimental_run_v2(train_step_opt)
def train_step(self):
with self.strategy.scope():
with tf.Session() as sess:
sess.run(self.minimize, feed_dict={self.x: self.local_data})
self.current_w = sess.run(self.w)
return self.current_w
ray.init()
data = np.arange(4) + 1
data = data.reshape((-1, 1))
data_w = [data[None, i] for i in range(4)]
trainers = [Trainer.remote(d) for d in data_w]
W = ray.get([t.train_step.remote() for t in trainers])[0]
print(W)
It is supposed to simply computes the derivative of a linear function in different processes, reduce all the derivatives in a single value and apply it to the unique parameter "w".
When I run it I get the following error:
Traceback (most recent call last):
File "dtfray.py", line 49, in <module>
r = ray.get([t.train_step.remote() for t in trainers])
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/ray/_private/client_mode_hook.py", line 47, in wrapper
return func(*args, **kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/ray/worker.py", line 1456, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(TypeError): ray::Trainer.train_step() (pid=25401, ip=10.128.0.46)
File "python/ray/_raylet.pyx", line 439, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 473, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 476, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 480, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 432, in ray._raylet.execute_task.function_executor
File "dtfray.py", line 32, in __init__
self.minimize = self.strategy.experimental_run_v2(train_step_opt)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py", line 957, in experimental_run_v2
return self.run(fn, args=args, kwargs=kwargs, options=options)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py", line 951, in run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py", line 2290, in call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/mirrored_strategy.py", line 770, in _call_for_each_replica
fn, args, kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/mirrored_strategy.py", line 201, in _call_for_each_replica
coord.join(threads)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/training/coordinator.py", line 389, in join
six.reraise(*self._exc_info_to_raise)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/six.py", line 703, in reraise
raise value
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/training/coordinator.py", line 297, in stop_on_exception
yield
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/distribute/mirrored_strategy.py", line 998, in run
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
File "/home/Adrian/miniconda3/envs/sukan_env/lib/python3.7/site-packages/tensorflow/python/autograph/impl/api.py", line 282, in wrapper
return func(*args, **kwargs)
File "dtfray.py", line 22, in train_step_opt
tf.distribute.get_replica_context().merge_call()
TypeError: merge_call() missing 1 required positional argument: 'merge_fn'
As highlighted in the following section of source code:
def train_step_opt():
def grad_fn():
grad = tf.gradients(self.y, [self.w])
return grad
per_replica_grad = self.strategy.experimental_run_v2(grad_fn)
red_grad = self.strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_grad, axis=None)
minimize = self.w.assign_sub(red_grad[0])
return minimize
self.minimize = self.strategy.experimental_run_v2(train_step_opt)
you have to reduce the results from tf.distribute.MirroredStrategy() one more time for train_step_opt after line self.minimize = self.strategy.experimental_run_v2(train_step_opt) , as you are calling self.strategy.experimental_run_v2() twice, once on train_step_opt and then on grad_fn.
Also, you can review the following sections from Line 178 to 188 of mirrored_run.py file of TF Github repo, as get_replica_context() is triggered for cross-replica context:
When fn starts should_run event is set on
_MirroredReplicaThread (MRT) threads. The execution waits until
MRT.has_paused is set, which indicates that either fn is complete
or a get_replica_context().merge_call() is called. If fn
is complete, then MRT.done is set to True. Otherwise, arguments
of get_replica_context().merge_call from all paused threads are grouped and the merge_fn is performed. Results of the
get_replica_context().merge_call are then set to MRT.merge_result.
Each such get_replica_context().merge_call call returns the MRT.merge_result for that thread when MRT.should_run event is
reset again. Execution of fn resumes.
Thanks in advance!
I am working on portfolio optimisation with PyPortfolioOptimisation.
I have the prices of my underlying assets starting from 2015-01-01 up till 2021-05-19
The dataframe shape is [1666 rows x 20 columns]
I ran the following codes
mu = expected_returns.mean_historical_return(df)
cov = risk_models.sample_cov(df)
print('Mean' '\n'+str(mu))
print('Covariance: ' '\n' +str(cov))
ef = EfficientFrontier(mu,cov)
weights = ef.max_sharpe()
cleaned_w = ef.cleaned_weights()
print(cleaned_w)
ef.portfolio_performance(verbose=True)
But it gives an error stating workspace allocation error pointing to line weights = ef.max_sharpe()
Traceback (most recent call last):
File "F:\Python projects\KS\Investment\Efficient frontier.py", line 34, in <module>
weights = ef.max_sharpe()
File "F:\Python projects\KS\lib\site-packages\pypfopt\efficient_frontier\efficient_frontier.py", line 278, in max_sharpe
self._solve_cvxpy_opt_problem()
File "F:\Python projects\KS\lib\site-packages\pypfopt\base_optimizer.py", line 239, in _solve_cvxpy_opt_problem
self._opt.solve(verbose=self._verbose, **self._solver_options)
File "F:\Python projects\KS\lib\site-packages\cvxpy\problems\problem.py", line 459, in solve
return solve_func(self, *args, **kwargs)
File "F:\Python projects\KS\lib\site-packages\cvxpy\problems\problem.py", line 947, in _solve
solution = solving_chain.solve_via_data(
File "F:\Python projects\KS\lib\site-packages\cvxpy\reductions\solvers\solving_chain.py", line 343, in solve_via_data
return self.solver.solve_via_data(data, warm_start, verbose,
File "F:\Python projects\KS\lib\site-packages\cvxpy\reductions\solvers\qp_solvers\osqp_qpif.py", line 103, in solve_via_data
solver.setup(P, q, A, lA, uA, verbose=verbose, **solver_opts)
File "F:\Python projects\KS\lib\site-packages\osqp\interface.py", line 37, in setup
self._model.setup(*unpacked_data, **settings)
ValueError: Workspace allocation error!
I tried changing the memory setting in Pycharm but to no avail. Is memory the same as workspace allocation? Sorry for these fundamental questions...
Cheers mate
How to resume training from the saved snapshot in chainer.I was trying to implement DCGAN using chainer using the following github link:
https://github.com/chainer/chainer/blob/master/examples/dcgan/train_dcgan.py
When I try to give the --resume parameter it is showing shape mismatch error in the network.
In the python code there is option to give snaptshot from which we need to resume the training.These snapshots are automatically getting saved to result folder.That is also given as argument in the code.So I tried to resume training from the saved snapshot by giving the below command.
$ python train.py --resume 'snapshot.npz'
where train.py is the modified code with label for dcgan ciphar10 dataset.
The error I got by giving the above command is :
chainer.utils.type_check.InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 110 != 100
When I run the python file with the below command there is no error:
$ python train.py
Complete error trace:
Exception in main training loop:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 110 != 100
Traceback (most recent call last):
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py, line 315, in run
update()
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py, line 165, in update
self.update_core()
File /home/964769/Lakshmi/DCGAN/updater_with_label.py, line 50, in update_core
x_fake = gen(z,labels)
File /home/964769/Lakshmi/DCGAN/net_with_label.py, line 61, in call
h = F.reshape(F.relu(self.bn0(self.l0(F.concat((z,t),axis=1)))),
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/link.py, line 242, in call
out = forward(args, *kwargs)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/links/connection/linear.py, line 138, in forward
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/functions/connection/linear.py, line 288, in linear
y, = LinearFunction().apply(args)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/function_node.py, line 245, in apply
self.check_data_type_forward(in_data)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/function_node.py, line 330, in check_data_type_forward
self.check_type_forward(in_type)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/functions/connection/linear.py, line 27, in check_type_forward
x_type.shape[1] == w_type.shape[1],
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/utils/typecheck.py, line 546, in expect
expr.expect()
File/home/964769/anaconda3/lib/python3.6/site-packages/chainer/utils/typecheck.py, line 483, in expect
'{0} {1} {2}'.format(left, self.inv, right))
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
Filetrain.py, line 140, in
main()
Filetrain.py, line 135, in main
trainer.run()
File/home/964769/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py, line 329, in run
six.reraise(sys.exc_info())
File /home/964769/anaconda3/lib/python3.6/site-packages/six.py, line 686, in reraise
raise value
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py, line 315, in run
update()
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py, line 165, in update
self.update_core()
File /home/964769/Lakshmi/DCGAN/updater_with_label.py, line 50, in update_core
x_fake = gen(z,labels)
File /home/964769/Lakshmi/DCGAN/net_with_label.py, line 61, in call
h = F.reshape(F.relu(self.bn0(self.l0(F.concat((z,t),axis=1)))),
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/link.py, line 242, in call
out = forward(args, **kwargs)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/links/connection/linear.py, line 138, in forward
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/functions/connection/linear.py, line 288, in linear
y, = LinearFunction().apply(args)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/function_node.py, line 245, in apply
self.check_data_type_forward(in_data)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/function_node.py, line 330, in check_data_type_forward
self.check_type_forward(in_type)
File /home/964769/anaconda3/lib/python3.6/site-packages/chainer/functions/connection/linear.py, line 27, in checktype_forward
x_type.shape[1] == w_type.shape[1],
File "/home/964769/anaconda3/lib/python3.6/site-packages/chainer/utils/typecheck.py, line 546, in expect
expr.expect()
File/home/964769/anaconda3/lib/python3.6/site-packages/chainer/utils/type_check.py", line 483, in expect
'{0} {1} {2}'.format(left, self.inv, right))
chainer.utils.type_check.InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 110 != 100