Parameter estimation with parmest and pyomo model - python

I have the enzymatic reaction:
R+L<->Y
R+I<->X
Describes by the following system of coupled differential equations:
dR/dt=k2Y(t)-k1R(t)L(t)+k4X(t)-k3*R(t)*I(t)
dL/dt=k2Y(t)-k1R(t)*L(t)
dI/dt=k4X(t)-k3R(t)*I(t)
dY/dt=k1*R(t)L(t)-k2Y(t)
dX/dt=k3*R(t)I(t)-k4X(t)
The parameters k1 and k2 are knowed. I want to estimate the value of k3 and k4 using a pyomo model and a set of Y data in time. This is my code:
#Modules
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import scipy.stats as stats
import scipy.optimize as optimize
from pyomo.environ import *
from pyomo.dae import *
from pyomo.dae.simulator import Simulator
import pyomo.contrib.parmest.parmest as parmest
from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model
import sys
import os.path
#Experimental data
texp=[0,0.5,0.75,1,1.25,2,2.77,3.55,4.32,5.1,5.87,6.65,7.42,8.2,8.97,13.92,\
18.92,23.92,28.92,33.92,38.92,43.92,48.92,53.92,58.92,63.92,68.92,83.9,\
98.9,113.9,128.9,143.9,158.9,173.9,188.9,203.9,218.9,233.9,248.9]
yexp=[0,21.00084301,-54.20967226,-12.0118567,-25.27198718,-1.764831016,\
10.29814076,-5.340599221,6.988265971,9.56252586,-3.705303123,1.063813346,\
12.32611118,7.763248428,9.074028389,20.60003402,22.1001936,23.13229101,\
27.31536018,25.00455108,31.70315201,35.10288809,38.0816535,35.30253723,\
36.81655545,36.11171691,41.57221204,42.47852506,46.28315167,42.66070948,\
44.73318881,37.36241544,39.69557981,38.71667563,37.49757832,42.35943236,\
41.68017195,44.91883581,47.80088108]
df=pd.DataFrame()
df['t']=texp
df['1Cc']=yexp
#MODEL
def create_model(data):
m=ConcreteModel()
#···Parameters to estimate
m.k3=Var(initialize=1e8, within=PositiveReals)
m.k3.fixed=True
m.k4=Var(initialize=0.01, within=PositiveReals)
m.k3.fixed=True
#···Knowed parameters
k1=3.58e6
k2=1.25e-1
#···Data
m.t=ContinuousSet(initialize=texp)
#···Variables
m.Y=Var(m.t)
m.X=Var(m.t)
m.R=Var(m.t)
m.L=Var(m.t)
m.I=Var(m.t)
m.dYdt=DerivativeVar(m.Y, wrt=m.t)
m.dXdt=DerivativeVar(m.X, wrt=m.t)
m.dRdt=DerivativeVar(m.R, wrt=m.t)
m.dLdt=DerivativeVar(m.L, wrt=m.t)
m.dIdt=DerivativeVar(m.I, wrt=m.t)
#···Initial Conditions
m.Y[0]=0.0
m.X[0]=0.0
m.R[0]=0.5e-9
m.L[0]=30e-9
m.I[0]=1e-9
#Constraints
def DiffX(m,t):
return m.dXdt[t]==-m.k4*m.X[t]+m.k3*m.R[t]*m.I[t]
m.XC=Constraint(m.t, rule=DiffX)
def DiffR(m,t):
return m.dRdt[t]==k2*m.Y[t]-k1*m.R[t]*m.L[t]+m.k4*m.X[t]-m.k3*m.R[t]*m.I[t]
m.RC=Constraint(m.t, rule=DiffR)
def DiffL(m,t):
return m.dLdt[t]==k2*m.Y[t]-k1*m.R[t]*m.L[t]
m.LC=Constraint(m.t, rule=DiffL)
def DiffI(m,t):
return m.dIdt[t]==m.k4*m.X[t]-m.k3*m.R[t]*m.I[t]
m.IC=Constraint(m.t, rule=DiffI)
def DiffY(m,t):
return m.dYdt[t]==-k2*m.Y[t]+k1*m.R[t]*m.L[t]
m.YC=Constraint(m.t, rule=DiffY)
return m
def main():
#Vars to estimate
theta_names=['k3','k4']
#Data
data=df
def SSE(model,data):
expr=(data['1Cc']-model.Y)**2
return expr
#Instance of the parmest estimator
pest=parmest.Estimator(create_model,data,theta_names,SSE)
#Parameter estimation
obj,theta=pest.theta_est()
#Assert
k3_expected=1e8
k4_expected=0.01
relative_error=abs(theta['k3']-k3_expected)/k3_expected
assert relative_error<0.05
relative_error=abs(theta['k4']-k4_expected)/k4_expected
assert relative_error<0.05
if __name__=="__main__":
main()
I get the following error:
ERROR: Rule failed for Expression 'SecondStageCost' with index None:
TypeError: 'float' object cannot be interpreted as an integer
ERROR: Constructing component 'SecondStageCost' from data=None failed:
TypeError: 'float' object cannot be interpreted as an integer
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:143, in _experiment_instance_creation_callback(scenario_name, node_names, cb_data)
142 try:
--> 143 instance = callback(experiment_number = exp_num, cb_data = cb_data)
144 except TypeError:
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:391, in Estimator._instance_creation_callback(self, experiment_number, cb_data)
390 raise RuntimeError(f'Unexpected data format for cb_data={cb_data}')
--> 391 model = self._create_parmest_model(exp_data)
393 return model
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:366, in Estimator._create_parmest_model(self, data)
365 model.FirstStageCost = pyo.Expression(rule=FirstStageCost_rule)
--> 366 model.SecondStageCost = pyo.Expression(rule=_SecondStageCostExpr(self.obj_function, data))
368 def TotalCost_rule(model):
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\block.py:544, in _BlockData.__setattr__(self, name, val)
540 if isinstance(val, Component):
541 #
542 # Pyomo components are added with the add_component method.
543 #
--> 544 self.add_component(name, val)
545 else:
546 #
547 # Other Python objects are added with the standard __setattr__
548 # method.
549 #
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\block.py:1089, in _BlockData.add_component(self, name, val)
1088 try:
-> 1089 val.construct(data)
1090 except:
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\expression.py:369, in Expression.construct(self, data)
368 assert data is None
--> 369 self._construct_from_rule_using_setitem()
370 finally:
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\indexed_component.py:708, in IndexedComponent._construct_from_rule_using_setitem(self)
705 elif rule.constant():
706 # Slight optimization: if the initializer is known to be
707 # constant, then only call the rule once.
--> 708 val = rule(block, None)
709 for index in self.index_set():
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\initializer.py:373, in ScalarCallInitializer.__call__(self, parent, idx)
372 def __call__(self, parent, idx):
--> 373 return self._fcn(parent)
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:270, in _SecondStageCostExpr.__call__(self, model)
269 def __call__(self, model):
--> 270 return self._ssc_function(model, self._data)
Input In [35], in main.<locals>.SSE(model, data)
91 def SSE(model,data):
---> 92 expr=(data['1Cc']-model.Y)**2
93 return expr
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\ops\common.py:70, in _unpack_zerodim_and_defer.<locals>.new_method(self, other)
68 other = item_from_zerodim(other)
---> 70 return method(self, other)
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\arraylike.py:108, in OpsMixin.__sub__(self, other)
106 #unpack_zerodim_and_defer("__sub__")
107 def __sub__(self, other):
--> 108 return self._arith_method(other, operator.sub)
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\series.py:5639, in Series._arith_method(self, other, op)
5638 self, other = ops.align_method_SERIES(self, other)
-> 5639 return base.IndexOpsMixin._arith_method(self, other, op)
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\base.py:1295, in IndexOpsMixin._arith_method(self, other, op)
1294 with np.errstate(all="ignore"):
-> 1295 result = ops.arithmetic_op(lvalues, rvalues, op)
1297 return self._construct_result(result, name=res_name)
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\ops\array_ops.py:222, in arithmetic_op(left, right, op)
220 _bool_arith_check(op, left, right)
--> 222 res_values = _na_arithmetic_op(left, right, op)
224 return res_values
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\ops\array_ops.py:163, in _na_arithmetic_op(left, right, op, is_cmp)
162 try:
--> 163 result = func(left, right)
164 except TypeError:
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\computation\expressions.py:239, in evaluate(op, a, b, use_numexpr)
237 if use_numexpr:
238 # error: "None" not callable
--> 239 return _evaluate(op, op_str, a, b) # type: ignore[misc]
240 return _evaluate_standard(op, op_str, a, b)
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\computation\expressions.py:128, in _evaluate_numexpr(op, op_str, a, b)
127 if result is None:
--> 128 result = _evaluate_standard(op, op_str, a, b)
130 return result
File ~\AppData\Roaming\Python\Python39\site-packages\pandas\core\computation\expressions.py:69, in _evaluate_standard(op, op_str, a, b)
68 _store_test_result(False)
---> 69 return op(a, b)
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\indexed_component.py:1113, in IndexedComponent_NDArrayMixin.__array_ufunc__(self, ufunc, method, *inputs, **kwargs)
1112 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-> 1113 return NumericNDArray.__array_ufunc__(
1114 None, ufunc, method, *inputs, **kwargs)
File pyomo\core\expr\numvalue.pyx:997, in pyomo.core.expr.numvalue.NumericNDArray.__array_ufunc__()
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\core\base\indexed_component.py:1107, in IndexedComponent_NDArrayMixin.__array__(self, dtype)
1106 shape = tuple(b+1 for b in bounds[1])
-> 1107 ans = NumericNDArray(shape=shape, dtype=object)
1108 for k, v in self.items():
TypeError: 'float' object cannot be interpreted as an integer
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
Input In [35], in <cell line: 110>()
107 assert relative_error<0.05
110 if __name__=="__main__":
--> 111 main()
Input In [35], in main()
96 pest=parmest.Estimator(create_model,data,theta_names,SSE)
98 #Parameter estimation
---> 99 obj,theta=pest.theta_est()
101 #Assert
102 k3_expected=1e8
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:687, in Estimator.theta_est(self, solver, return_values, calc_cov, cov_n)
684 assert isinstance(cov_n, int), "The number of datapoints that are used in the objective function is required to calculate the covariance matrix"
685 assert cov_n > len(self.theta_names), "The number of datapoints must be greater than the number of parameters to estimate"
--> 687 return self._Q_opt(solver=solver, return_values=return_values,
688 bootlist=None, calc_cov=calc_cov, cov_n=cov_n)
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:432, in Estimator._Q_opt(self, ThetaVals, solver, return_values, bootlist, calc_cov, cov_n)
426 ef = sputils.create_EF(scen_names,
427 _experiment_instance_creation_callback,
428 EF_name = "_Q_opt",
429 suppress_warnings=True,
430 scenario_creator_kwargs=scenario_creator_options)
431 else:
--> 432 ef = local_ef.create_EF(scen_names,
433 _experiment_instance_creation_callback,
434 EF_name = "_Q_opt",
435 suppress_warnings=True,
436 scenario_creator_kwargs=scenario_creator_options)
437 self.ef_instance = ef
439 # Solve the extensive form with ipopt
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\create_ef.py:88, in create_EF(scenario_names, scenario_creator, scenario_creator_kwargs, EF_name, suppress_warnings, nonant_for_fixed_vars)
86 if scenario_creator_kwargs is None:
87 scenario_creator_kwargs = dict()
---> 88 scen_dict = {
89 name: scenario_creator(name, **scenario_creator_kwargs)
90 for name in scenario_names
91 }
93 if (len(scen_dict) == 0):
94 raise RuntimeError("create_EF() received empty scenario list")
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\create_ef.py:89, in <dictcomp>(.0)
86 if scenario_creator_kwargs is None:
87 scenario_creator_kwargs = dict()
88 scen_dict = {
---> 89 name: scenario_creator(name, **scenario_creator_kwargs)
90 for name in scenario_names
91 }
93 if (len(scen_dict) == 0):
94 raise RuntimeError("create_EF() received empty scenario list")
File ~\AppData\Roaming\Python\Python39\site-packages\pyomo\contrib\parmest\parmest.py:145, in _experiment_instance_creation_callback(scenario_name, node_names, cb_data)
143 instance = callback(experiment_number = exp_num, cb_data = cb_data)
144 except TypeError:
--> 145 raise RuntimeError("Only one callback signature is supported: "
146 "callback(experiment_number, cb_data) ")
147 """
148 try:
149 instance = callback(scenario_tree_model, scen_name, node_names)
(...)
158 raise
159 """
160 if hasattr(instance, "_mpisppy_node_list"):
RuntimeError: Only one callback signature is supported: callback(experiment_number, cb_data)
​The model runs ok in simulation, so I don´t know where is the problem. I will be very grateful if someone can help me. Thank you!

Related

NotImplementedError: Failed in nopython mode pipeline. Use of unknown opcode MAP_ADD at line 116 of <ipython-input-287-147d4798a88b>

I try to launch a code with Numba and I get errors.
What I want to do is to compute the cosine similarity with a cosinus_sparse function. This class method I use in the search class method, then I call search in the results method. Although I added the #jit decorator before each method I have this implementation error that appears.
Here is my code:
import numpy as np
from numba import jit
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import pandas as pd
import math
class Search:
def __init__(self, corpus, method='XTERM', stop_words='english', max_df=1.0, min_df=1, max_features=None):
self.corpus = corpus
self.method = method
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.vectorization()
self.get_shape()
self.features_names = self.bag_of_word.get_feature_names()
def vectorization(self):
if self.method == 'XTERM':
self.bag_of_word = CountVectorizer(stop_words=self.stop_words,
max_df=self.max_df, min_df=self.min_df,
max_features=self.max_features)
self.corpus_vectorized = self.bag_of_word.fit_transform(self.corpus)
elif self.method == 'TFxIDF':
self.bag_of_word = TfidfVectorizer(stop_words=self.stop_words,
max_df=self.max_df, min_df=self.min_df,
max_features=self.max_features)
self.corpus_vectorized = self.bag_of_word.fit_transform(self.corpus)
else:
raise MethodError('Method provided is not valid')
def get_shape(self):
self.n_docs, self.n_terms = self.corpus_vectorized.shape
def get_query(self, query):
self.indexes = [self.features_names.index(q) for q in query if q in self.features_names]
self.query_vec = np.zeros(self.n_terms)
self.query_vec[self.indexes] = 1
#staticmethod
#jit(nopython=True)
def cosinus_sparse(i, j):
num = i.dot(j)
spars = i * i.transpose()
den = math.sqrt(spars[0, 0]) * math.sqrt(sum(j * j))
if (den > 0):
return int(num) / den
else:
return 0
#jit(nopython=True)
def search(self, q) -> dict:
cc = {i: self.cosinus_sparse(self.corpus_vectorized[i, :], q) for i in range(self.n_docs)}
cc = sorted(cc.items(), key=lambda x: x[1], reverse=True)
return cc
#jit
def get_result(self) -> list:
self.result = self.search(self.query_vec)
def result_announcer(self):
self.search_lenght = len([i for i in self.result if i[1] > 0])
print('{} documents linked to your query where found'.format(search_lenght))
def verif_query_vec(self, query):
if int(sum(self.query_vec)) != len(query):
raise QueryError('Error in query or query_vec')
def processing(self, query):
try:
self.get_query(query)
self.verif_query_vec(query)
self.get_result()
except NameError:
self.vectorisation()
self.get_shape()
self.get_feature_names()
self.get_query(query)
self.verif_query_vec(query)
self.get_result()
import ipywidgets as widgets
from IPython.display import display
text = widgets.Text(
value='',
placeholder='Type words',
description='String:',
disabled=False
)
method_radio = widgets.RadioButtons(
options=['XTERM', 'TFxIDF'],
# value='TF',
description='Method:',
disabled=False
)
submit = widgets.Button(description = 'Search')
display(widgets.VBox([text, radio, submit]))
def handle_submit(sender):
global query
query = text.value.lower().split(' ')
method = method_radio.value
# instentiation de l'objet de recherche
global search_obj
search_obj = Search(corpus=corpus, method=method, )
search_obj.processing(query)
submit.on_click(handle_submit)
Here is the error
NotImplementedError Traceback (most recent call last)
<ipython-input-288-025a488daa60> in handle_submit(sender)
27 global search_obj
28 search_obj = Search(corpus=corpus, method=method, )
---> 29 search_obj.processing(query)
30
31 submit.on_click(handle_submit)
<ipython-input-287-147d4798a88b> in processing(self, query)
167 self.get_query(query)
168 self.verif_query_vec(query)
--> 169 self.get_result()
170
171 except NameError:
~\Anaconda3\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
365 e.patch_message(''.join(e.args) + help_msg)
366 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 367 raise e
368
369 def inspect_llvm(self, signature=None):
~\Anaconda3\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
322 argtypes.append(self.typeof_pyval(a))
323 try:
--> 324 return self.compile(tuple(argtypes))
325 except errors.TypingError as e:
326 # Intercept typing error that may be due to an argument
~\Anaconda3\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~\Anaconda3\lib\site-packages\numba\dispatcher.py in compile(self, sig)
653
654 self._cache_misses[sig] += 1
--> 655 cres = self._compiler.compile(args, return_type)
656 self.add_overload(cres)
657 self._cache.save_overload(sig, cres)
~\Anaconda3\lib\site-packages\numba\dispatcher.py in compile(self, args, return_type)
80 args=args, return_type=return_type,
81 flags=flags, locals=self.locals,
---> 82 pipeline_class=self.pipeline_class)
83 # Check typing error if object mode is used
84 if cres.typing_error is not None and not flags.enable_pyobject:
~\Anaconda3\lib\site-packages\numba\compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
924 pipeline = pipeline_class(typingctx, targetctx, library,
925 args, return_type, flags, locals)
--> 926 return pipeline.compile_extra(func)
927
928
~\Anaconda3\lib\site-packages\numba\compiler.py in compile_extra(self, func)
372 self.lifted = ()
373 self.lifted_from = None
--> 374 return self._compile_bytecode()
375
376 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~\Anaconda3\lib\site-packages\numba\compiler.py in _compile_bytecode(self)
855 """
856 assert self.func_ir is None
--> 857 return self._compile_core()
858
859 def _compile_ir(self):
~\Anaconda3\lib\site-packages\numba\compiler.py in _compile_core(self)
842 self.define_pipelines(pm)
843 pm.finalize()
--> 844 res = pm.run(self.status)
845 if res is not None:
846 # Early pipeline completion
~\Anaconda3\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~\Anaconda3\lib\site-packages\numba\compiler.py in run(self, status)
253 # No more fallback pipelines?
254 if is_final_pipeline:
--> 255 raise patched_exception
256 # Go to next fallback pipeline
257 else:
~\Anaconda3\lib\site-packages\numba\compiler.py in run(self, status)
244 try:
245 event(stage_name)
--> 246 stage()
247 except _EarlyPipelineCompletion as e:
248 return e.result
~\Anaconda3\lib\site-packages\numba\compiler.py in stage_inline_pass(self)
582 self.flags.auto_parallel,
583 self.parfor_diagnostics.replaced_fns)
--> 584 inline_pass.run()
585 # Remove all Dels, and re-run postproc
586 post_proc = postproc.PostProcessor(self.func_ir)
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in run(self)
75
76 if guard(self._inline_closure,
---> 77 work_list, block, i, func_def):
78 modified = True
79 break # because block structure changed
~\Anaconda3\lib\site-packages\numba\ir_utils.py in guard(func, *args, **kwargs)
1358 """
1359 try:
-> 1360 return func(*args, **kwargs)
1361 except GuardException:
1362 return None
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in _inline_closure(self, work_list, block, i, func_def)
212 inline_closure_call(self.func_ir,
213 self.func_ir.func_id.func.__globals__,
--> 214 block, i, func_def, work_list=work_list)
215 return True
216
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in inline_closure_call(func_ir, glbls, block, i, callee, typingctx, arg_typs, typemap, calltypes, work_list)
253 callee_closure = callee.closure if hasattr(callee, 'closure') else callee.__closure__
254 # first, get the IR of the callee
--> 255 callee_ir = get_ir_of_code(glbls, callee_code)
256 callee_blocks = callee_ir.blocks
257
~\Anaconda3\lib\site-packages\numba\ir_utils.py in get_ir_of_code(glbls, fcode)
1572 f.__name__ = fcode.co_name
1573 from numba import compiler
-> 1574 ir = compiler.run_frontend(f)
1575 # we need to run the before inference rewrite pass to normalize the IR
1576 # XXX: check rewrite pass flag?
~\Anaconda3\lib\site-packages\numba\compiler.py in run_frontend(func)
168 interp = interpreter.Interpreter(func_id)
169 bc = bytecode.ByteCode(func_id=func_id)
--> 170 func_ir = interp.interpret(bc)
171 post_proc = postproc.PostProcessor(func_ir)
172 post_proc.run()
~\Anaconda3\lib\site-packages\numba\interpreter.py in interpret(self, bytecode)
101 # Data flow analysis
102 self.dfa = dataflow.DataFlowAnalysis(self.cfa)
--> 103 self.dfa.run()
104
105 # Temp states during interpretation
~\Anaconda3\lib\site-packages\numba\dataflow.py in run(self)
26 def run(self):
27 for blk in self.cfa.iterliveblocks():
---> 28 self.infos[blk.offset] = self.run_on_block(blk)
29
30 def run_on_block(self, blk):
~\Anaconda3\lib\site-packages\numba\dataflow.py in run_on_block(self, blk)
76 for offset in blk:
77 inst = self.bytecode[offset]
---> 78 self.dispatch(info, inst)
79 return info
80
~\Anaconda3\lib\site-packages\numba\dataflow.py in dispatch(self, info, inst)
86 fname = "op_%s" % inst.opname.replace('+', '_')
87 fn = getattr(self, fname, self.handle_unknown_opcode)
---> 88 fn(info, inst)
89
90 def handle_unknown_opcode(self, info, inst):
~\Anaconda3\lib\site-packages\numba\dataflow.py in handle_unknown_opcode(self, info, inst)
91 msg = "Use of unknown opcode {} at line {} of {}"
92 raise NotImplementedError(msg.format(inst.opname, inst.lineno,
---> 93 self.bytecode.func_id.filename))
94
95 def dup_topx(self, info, inst, count):
NotImplementedError: Failed in nopython mode pipeline (step: inline calls to locally defined closures)
Use of unknown opcode MAP_ADD at line 116 of <ipython-input-287-147d4798a88b>
How do I fix this error?
Thanks a lot for your help.

TypeError: 'DesignMatrix' object is not callable

I am trying to create B-splines with the patsy package on a ipynb notebook on JupyterLab:
from patsy import dmatrix
bs = dmatrix("bs(x, df=50, degree=1) - 1", {"x": x})
axes[0].plot(x, bs)
axes[0].set_title("Basis functions")
plt.show()
This works fine the first time I run it. But when I try to rerun this cell again, it fails with the following error:
-----------------------------------------------------
TypeError Traceback (most recent call last)
/opt/conda/lib/python3.8/site-packages/patsy/compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs)
35 try:
---> 36 return f(*args, **kwargs)
37 except Exception as e:
/opt/conda/lib/python3.8/site-packages/patsy/eval.py in eval(self, expr, source_name, inner_namespace)
164 code = compile(expr, source_name, "eval", self.flags, False)
--> 165 return eval(code, {}, VarLookupDict([inner_namespace]
166 + self._namespaces))
<string> in <module>
TypeError: 'DesignMatrix' object is not callable
The above exception was the direct cause of the following exception:
PatsyError Traceback (most recent call last)
<ipython-input-6-6ed4ba95a384> in <module>
2
3 _, axes = plt.subplots(2, figsize=(16, 16))
----> 4 bs = dmatrix("bs(x, df=50, degree=1) - 1", {"x": x})
5 axes[0].plot(x, bs)
6 axes[0].set_title("Basis functions")
/opt/conda/lib/python3.8/site-packages/patsy/highlevel.py in dmatrix(formula_like, data, eval_env, NA_action, return_type)
288 """
289 eval_env = EvalEnvironment.capture(eval_env, reference=1)
--> 290 (lhs, rhs) = _do_highlevel_design(formula_like, data, eval_env,
291 NA_action, return_type)
292 if lhs.shape[1] != 0:
/opt/conda/lib/python3.8/site-packages/patsy/highlevel.py in _do_highlevel_design(formula_like, data, eval_env, NA_action, return_type)
162 def data_iter_maker():
163 return iter([data])
--> 164 design_infos = _try_incr_builders(formula_like, data_iter_maker, eval_env,
165 NA_action)
166 if design_infos is not None:
/opt/conda/lib/python3.8/site-packages/patsy/highlevel.py in _try_incr_builders(formula_like, data_iter_maker, eval_env, NA_action)
64 if isinstance(formula_like, ModelDesc):
65 assert isinstance(eval_env, EvalEnvironment)
---> 66 return design_matrix_builders([formula_like.lhs_termlist,
67 formula_like.rhs_termlist],
68 data_iter_maker,
/opt/conda/lib/python3.8/site-packages/patsy/build.py in design_matrix_builders(termlists, data_iter_maker, eval_env, NA_action)
691 # on some data to find out what type of data they return.
692 (num_column_counts,
--> 693 cat_levels_contrasts) = _examine_factor_types(all_factors,
694 factor_states,
695 data_iter_maker,
/opt/conda/lib/python3.8/site-packages/patsy/build.py in _examine_factor_types(factors, factor_states, data_iter_maker, NA_action)
441 for data in data_iter_maker():
442 for factor in list(examine_needed):
--> 443 value = factor.eval(factor_states[factor], data)
444 if factor in cat_sniffers or guess_categorical(value):
445 if factor not in cat_sniffers:
/opt/conda/lib/python3.8/site-packages/patsy/eval.py in eval(self, memorize_state, data)
562
563 def eval(self, memorize_state, data):
--> 564 return self._eval(memorize_state["eval_code"],
565 memorize_state,
566 data)
/opt/conda/lib/python3.8/site-packages/patsy/eval.py in _eval(self, code, memorize_state, data)
545 def _eval(self, code, memorize_state, data):
546 inner_namespace = VarLookupDict([data, memorize_state["transforms"]])
--> 547 return call_and_wrap_exc("Error evaluating factor",
548 self,
549 memorize_state["eval_env"].eval,
/opt/conda/lib/python3.8/site-packages/patsy/compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs)
41 origin)
42 # Use 'exec' to hide this syntax from the Python 2 parser:
---> 43 exec("raise new_exc from e")
44 else:
45 # In python 2, we just let the original exception escape -- better
/opt/conda/lib/python3.8/site-packages/patsy/compat.py in <module>
PatsyError: Error evaluating factor: TypeError: 'DesignMatrix' object is not callable
bs(x, df=50, degree=1) - 1
^^^^^^^^^^^^^^^^^^^^^^
Ends up it was because of myself overriding the variable bs hence overriding the bs function inside the patsy string.
This is why eval is an antipattern as usual...

"Error while extracting" from tensorflow datasets

I want to train a tensorflow image segmentation model on COCO, and thought I would leverage the dataset builder already included. Download seems to be completed but it crashes on extracting the zip files.
Running with TF 2.0.0 on a Jupyter Notebook under a conda environment. Computer is 64-bit Windows 10. The Oxford Pet III dataset used in the official image segmentation tutorial works fine.
Below is the error message (my local user name replaced with %user%).
---------------------------------------------------------------------------
OutOfRangeError Traceback (most recent call last)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
88 try:
---> 89 for path, handle in iter_archive(from_path, method):
90 path = tf.compat.as_text(path)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in iter_zip(arch_f)
176 with _open_or_pass(arch_f) as fobj:
--> 177 z = zipfile.ZipFile(fobj)
178 for member in z.infolist():
~\.conda\envs\tf-tutorial\lib\zipfile.py in __init__(self, file, mode, compression, allowZip64)
1130 if mode == 'r':
-> 1131 self._RealGetContents()
1132 elif mode in ('w', 'x'):
~\.conda\envs\tf-tutorial\lib\zipfile.py in _RealGetContents(self)
1193 try:
-> 1194 endrec = _EndRecData(fp)
1195 except OSError:
~\.conda\envs\tf-tutorial\lib\zipfile.py in _EndRecData(fpin)
263 # Determine file size
--> 264 fpin.seek(0, 2)
265 filesize = fpin.tell()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
506 instructions)
--> 507 return func(*args, **kwargs)
508
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in seek(self, offset, whence, position)
166 elif whence == 2:
--> 167 offset += self.size()
168 else:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in size(self)
101 """Returns the size of the file."""
--> 102 return stat(self.__name).length
103
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat(filename)
726 """
--> 727 return stat_v2(filename)
728
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat_v2(path)
743 file_statistics = pywrap_tensorflow.FileStatistics()
--> 744 pywrap_tensorflow.Stat(compat.as_bytes(path), file_statistics)
745 return file_statistics
OutOfRangeError: C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
During handling of the above exception, another exception occurred:
ExtractError Traceback (most recent call last)
<ipython-input-27-887fa0198611> in <module>
1 cocoBuilder = tfds.builder('coco')
2 info = cocoBuilder.info
----> 3 cocoBuilder.download_and_prepare()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
50 _check_no_positional(fn, args, ismethod, allowed=allowed)
51 _check_required(fn, kwargs)
---> 52 return fn(*args, **kwargs)
53
54 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in download_and_prepare(self, download_dir, download_config)
285 self._download_and_prepare(
286 dl_manager=dl_manager,
--> 287 download_config=download_config)
288
289 # NOTE: If modifying the lines below to put additional information in
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, download_config)
946 super(GeneratorBasedBuilder, self)._download_and_prepare(
947 dl_manager=dl_manager,
--> 948 max_examples_per_split=download_config.max_examples_per_split,
949 )
950
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, **prepare_split_kwargs)
802 # Generating data for all splits
803 split_dict = splits_lib.SplitDict()
--> 804 for split_generator in self._split_generators(dl_manager):
805 if splits_lib.Split.ALL == split_generator.split_info.name:
806 raise ValueError(
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\image\coco.py in _split_generators(self, dl_manager)
237 root_url = 'http://images.cocodataset.org/'
238 extracted_paths = dl_manager.download_and_extract({
--> 239 key: root_url + url for key, url in urls.items()
240 })
241
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in download_and_extract(self, url_or_urls)
357 with self._downloader.tqdm():
358 with self._extractor.tqdm():
--> 359 return _map_promise(self._download_extract, url_or_urls)
360
361 #property
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _map_promise(map_fn, all_inputs)
393 """Map the function into each element and resolve the promise."""
394 all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
--> 395 res = utils.map_nested(_wait_on_promise, all_promises)
396 return res
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in <dictcomp>(.0)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
141 return tuple(mapped)
142 # Singleton
--> 143 return function(data_struct)
144
145
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _wait_on_promise(p)
377
378 def _wait_on_promise(p):
--> 379 return p.get()
380
381 else:
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in get(self, timeout)
508 target = self._target()
509 self._wait(timeout or DEFAULT_TIMEOUT)
--> 510 return self._target_settled_value(_raise=True)
511
512 def _target_settled_value(self, _raise=False):
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _target_settled_value(self, _raise)
512 def _target_settled_value(self, _raise=False):
513 # type: (bool) -> Any
--> 514 return self._target()._settled_value(_raise)
515
516 _value = _reason = _target_settled_value
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _settled_value(self, _raise)
222 if _raise:
223 raise_val = self._fulfillment_handler0
--> 224 reraise(type(raise_val), raise_val, self._traceback)
225 return self._fulfillment_handler0
226
~\.conda\envs\tf-tutorial\lib\site-packages\six.py in reraise(tp, value, tb)
694 if value.__traceback__ is not tb:
695 raise value.with_traceback(tb)
--> 696 raise value
697 finally:
698 value = None
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in handle_future_result(future)
840 # type: (Any) -> None
841 try:
--> 842 resolve(future.result())
843 except Exception as e:
844 tb = exc_info()[2]
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\.conda\envs\tf-tutorial\lib\concurrent\futures\thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
92 except BaseException as err:
93 msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
---> 94 raise ExtractError(msg)
95 # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
96 # directories, so delete destination first, if it already exists.
ExtractError: Error while extracting C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip to C:\Users\%user%\tensorflow_datasets\downloads\extracted\ZIP.images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip : C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
The message seems cryptic to me. The folder to which it is trying to extract does not exist when the notebook is started - it is created by Tensorflow, and only at that command line. I obviously tried deleting it completely and running it again, to no effect.
The code that leads to the error is (everything runs fine until the last line):
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
import tensorflow_datasets as tfds
from IPython.display import clear_output
import matplotlib.pyplot as plt
dataset, info = tfds.load('coco', with_info=True)
Also tried breaking down the last command into assigning the tdfs.builder object and then running download_and_extract, and again got the same error.
There is enough space in disk - after download, still 50+GB available, while the dataset is supposed to be 37GB in its largest version (2014).
I have a similar problem with Windows 10 & COCO 2017. My solution is simple. Extract the ZIP file manually according to the folder path in the error message.

having problemns while using dask map_partitions with string matching algorithm

I'm having some probems apllying a text search algorithm with parallelized dask insfrastructure.
I'm tryng to find the best match for 40,000 stirngs in a series object against a 4000 string list.
I could have done it using pandas.apply but it's to time expensive, so i decided try parallelization with map_partitions in dask.
I'm using this text search library with python-Levenshtein https://marcobonzanini.com/2015/02/25/fuzzy-string-matching-in-python
As you can see, it works ok on this example from a pandas dataset:
process.extractOne(df['endereco2'][1],choices=choices,scorer=fuzz.token_set_ratio,
score_cutoff=60)
Output: ('R ALVARO DUARTE DE ALMEIDA PROFESSOR', 85)
but its not working while using dask:
from dask import dataframe as dd
sd = dd.from_pandas(r13_2["endereco2"],npartitions=3).map_partitions(lambda df : df.apply(process.extractOne,choices=choices,scorer=fuzz.token_set_ratio,score_cutoff=60)).compute(scheduler='processes')
​
Output:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-69-f39ab0d086b5> in <module>
1 from dask import dataframe as dd
----> 2 sd = dd.from_pandas(r13_2["endereco2"],npartitions=3).map_partitions(lambda df : df.apply(process.extractOne,choices=choices,scorer=fuzz.token_set_ratio,score_cutoff=60)).compute(scheduler='processes')
~\Anaconda3\envs\mono\lib\site-packages\dask\base.py in compute(self, **kwargs)
154 dask.base.compute
155 """
--> 156 (result,) = compute(self, traverse=False, **kwargs)
157 return result
158
~\Anaconda3\envs\mono\lib\site-packages\dask\base.py in compute(*args, **kwargs)
396 keys = [x.__dask_keys__() for x in collections]
397 postcomputes = [x.__dask_postcompute__() for x in collections]
--> 398 results = schedule(dsk, keys, **kwargs)
399 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
400
~\Anaconda3\envs\mono\lib\site-packages\dask\multiprocessing.py in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, pool, **kwargs)
190 get_id=_process_get_id, dumps=dumps, loads=loads,
191 pack_exception=pack_exception,
--> 192 raise_exception=reraise, **kwargs)
193 finally:
194 if cleanup:
~\Anaconda3\envs\mono\lib\site-packages\dask\local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
460 _execute_task(task, data) # Re-execute locally
461 else:
--> 462 raise_exception(exc, tb)
463 res, worker_id = loads(res_info)
464 state['cache'][key] = res
~\Anaconda3\envs\mono\lib\site-packages\dask\compatibility.py in reraise(exc, tb)
109 def reraise(exc, tb=None):
110 if exc.__traceback__ is not tb:
--> 111 raise exc.with_traceback(tb)
112 raise exc
113
~\Anaconda3\envs\mono\lib\site-packages\dask\local.py in execute_task()
228 try:
229 task, data = loads(task_info)
--> 230 result = _execute_task(task, data)
231 id = get_id()
232 result = dumps((result, id))
~\Anaconda3\envs\mono\lib\site-packages\dask\core.py in _execute_task()
117 func, args = arg[0], arg[1:]
118 args2 = [_execute_task(a, cache) for a in args]
--> 119 return func(*args2)
120 elif not ishashable(arg):
121 return arg
~\Anaconda3\envs\mono\lib\site-packages\dask\optimization.py in __call__()
940 % (len(self.inkeys), len(args)))
941 return core.get(self.dsk, self.outkey,
--> 942 dict(zip(self.inkeys, args)))
943
944 def __reduce__(self):
~\Anaconda3\envs\mono\lib\site-packages\dask\core.py in get()
147 for key in toposort(dsk):
148 task = dsk[key]
--> 149 result = _execute_task(task, cache)
150 cache[key] = result
151 result = _execute_task(out, cache)
~\Anaconda3\envs\mono\lib\site-packages\dask\core.py in _execute_task()
117 func, args = arg[0], arg[1:]
118 args2 = [_execute_task(a, cache) for a in args]
--> 119 return func(*args2)
120 elif not ishashable(arg):
121 return arg
~\Anaconda3\envs\mono\lib\site-packages\dask\compatibility.py in apply()
91 def apply(func, args, kwargs=None):
92 if kwargs:
---> 93 return func(*args, **kwargs)
94 else:
95 return func(*args)
~\Anaconda3\envs\mono\lib\site-packages\dask\dataframe\core.py in apply_and_enforce()
3877 func = kwargs.pop('_func')
3878 meta = kwargs.pop('_meta')
-> 3879 df = func(*args, **kwargs)
3880 if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):
3881 if not len(df):
<ipython-input-69-f39ab0d086b5> in <lambda>()
1 from dask import dataframe as dd
----> 2 sd = dd.from_pandas(r13_2["endereco2"],npartitions=3).map_partitions(lambda df : df.apply(process.extractOne,choices=choices,scorer=fuzz.token_set_ratio,score_cutoff=60)).compute(scheduler='processes')
~\Anaconda3\envs\mono\lib\site-packages\pandas\core\series.py in apply()
3589 else:
3590 values = self.astype(object).values
-> 3591 mapped = lib.map_infer(values, f, convert=convert_dtype)
3592
3593 if len(mapped) and isinstance(mapped[0], Series):
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
~\Anaconda3\envs\mono\lib\site-packages\pandas\core\series.py in f()
3576 if kwds or args and not isinstance(func, np.ufunc):
3577 def f(x):
-> 3578 return func(x, *args, **kwds)
3579 else:
3580 f = func
~\Anaconda3\envs\mono\lib\site-packages\fuzzywuzzy\process.py in extractOne()
218 best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
219 try:
--> 220 return max(best_list, key=lambda i: i[1])
221 except ValueError:
222 return None
~\Anaconda3\envs\mono\lib\site-packages\fuzzywuzzy\process.py in extractWithoutOrder()
76
77 # Run the processor on the input query.
---> 78 processed_query = processor(query)
79
80 if len(processed_query) == 0:
~\Anaconda3\envs\mono\lib\site-packages\fuzzywuzzy\utils.py in full_process()
93 s = asciidammit(s)
94 # Keep only Letters and Numbers (see Unicode docs).
---> 95 string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)
96 # Force into lowercase.
97 string_out = StringProcessor.to_lower_case(string_out)
~\Anaconda3\envs\mono\lib\site-packages\fuzzywuzzy\string_processing.py in replace_non_letters_non_numbers_with_whitespace()
24 numbers with a single white space.
25 """
---> 26 return cls.regex.sub(" ", a_string)
27
28 strip = staticmethod(string.strip)
TypeError: expected string or bytes-like object
What's happenig?
Obs: I solved my problem using the pool.apply from multplocessing lib, but i still want to know what happened with Dask
Doing the MCVE I realized that it was a naive syntax problem: I can't use the map_partitions on a dask dataframe without specifying the column that im using even if there is only one column. So I should had used sd[0].map_partitions insted of sd.map_partitions

PatsyError, name error, name is not defined when using smf.ols

I am trying to use multi linear regression to analysis some time series data and their lags. Basically variables are some currency rate and their lag1 and lag2. Code is as below.
I tried to check each variable and there is nothing abnormal..
rate = pd.read_csv('P2training.csv', header=0)
#change date format in csv
rate['Date'] = pd.to_datetime(rate['Date'], format='%Y-%m-%d')
rate.set_index('Date', inplace=True, drop=True)
lags = [1,2]
lagdata = rate
for i in lags:
tmp = rate.shift(i).copy();
lagdata = lagdata.join(tmp, rsuffix='_lag{}'.format(i));
# fit the linear regression models
collist = list(lagdata.columns);
collist.remove('AUD/USD')
collist.remove('GBP/USD')
collist.remove('CAD/USD')
collist.remove('NLG/USD')
collist.remove('FRF/USD')
collist.remove('DEM/USD')
collist.remove('JPY/USD')
collist.remove('CHF/USD')
form = 'JPY/USD' + '~' + '+'.join(collist);
lagdata.dropna(inplace=True)
model = smf.ols(formula=form, data = lagdata).fit()
error occurs in last step when using smf.ols. A few name errors said some variables is not defined.
NameError Traceback (most recent call last)
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site- packages\patsy\compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs)
116 try:
--> 117 return f(*args, **kwargs)
118 except Exception as e:
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\eval.py in eval(self, expr, source_name, inner_namespace)
165 return eval(code, {}, VarLookupDict([inner_namespace]
--> 166 + self._namespaces))
167
<string> in <module>()
NameError: name 'USD_lag2' is not defined
The above exception was the direct cause of the following exception:
PatsyError Traceback (most recent call last)
<ipython-input-26-1985b8d39238> in <module>()
51 #print(collist)
52 #print(lagdata)
---> 53 model = smf.ols(formula=form, data = lagdata).fit()
54
55 #print(model.summary())
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site- packages\statsmodels\base\model.py in from_formula(cls, formula, data, subset, drop_cols, *args, **kwargs)
153
154 tmp = handle_formula_data(data, None, formula, depth=eval_env,
--> 155 missing=missing)
156 ((endog, exog), missing_idx, design_info) = tmp
157
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\statsmodels\formula\formulatools.py in handle_formula_data(Y, X, formula, depth, missing)
63 if data_util._is_using_pandas(Y, None):
64 result = dmatrices(formula, Y, depth, return_type='dataframe',
---> 65 NA_action=na_action)
66 else:
67 result = dmatrices(formula, Y, depth, return_type='dataframe',
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\highlevel.py in dmatrices(formula_like, data, eval_env, NA_action, return_type)
308 eval_env = EvalEnvironment.capture(eval_env, reference=1)
309 (lhs, rhs) = _do_highlevel_design(formula_like, data, eval_env,
--> 310 NA_action, return_type)
311 if lhs.shape[1] == 0:
312 raise PatsyError("model is missing required outcome variables")
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\highlevel.py in _do_highlevel_design(formula_like, data, eval_env, NA_action, return_type)
163 return iter([data])
164 design_infos = _try_incr_builders(formula_like, data_iter_maker, eval_env,
--> 165 NA_action)
166 if design_infos is not None:
167 return build_design_matrices(design_infos, data,
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\highlevel.py in _try_incr_builders(formula_like, data_iter_maker, eval_env, NA_action)
68 data_iter_maker,
69 eval_env,
---> 70 NA_action)
71 else:
72 return None
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\build.py in design_matrix_builders(termlists, data_iter_maker, eval_env, NA_action)
694 factor_states,
695 data_iter_maker,
--> 696 NA_action)
697 # Now we need the factor infos, which encapsulate the knowledge of
698 # how to turn any given factor into a chunk of data:
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\build.py in _examine_factor_types(factors, factor_states, data_iter_maker, NA_action)
441 for data in data_iter_maker():
442 for factor in list(examine_needed):
--> 443 value = factor.eval(factor_states[factor], data)
444 if factor in cat_sniffers or guess_categorical(value):
445 if factor not in cat_sniffers:
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\eval.py in eval(self, memorize_state, data)
564 return self._eval(memorize_state["eval_code"],
565 memorize_state,
--> 566 data)
567
568 __getstate__ = no_pickling
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\eval.py in _eval(self, code, memorize_state, data)
549 memorize_state["eval_env"].eval,
550 code,
--> 551 inner_namespace=inner_namespace)
552
553 def memorize_chunk(self, state, which_pass, data):
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs)
122 origin)
123 # Use 'exec' to hide this syntax from the Python 2 parser:
--> 124 exec("raise new_exc from e")
125 else:
126 # In python 2, we just let the original exception escape -- better
C:\Users\yaojia\AppData\Local\Continuum\Anaconda3\lib\site-packages\patsy\compat.py in <module>()
PatsyError: Error evaluating factor: NameError: name 'USD_lag2' is not defined
JPY/USD~AUD/USD_lag1+GBP/USD_lag1+CAD/USD_lag1+NLG/USD_lag1+FRF/USD_lag1+DEM/USD_lag1+JPY/USD_lag1+CHF/USD_lag1+AUD/USD_lag2+GBP/USD_lag2+CAD/USD_lag2+NLG/USD_lag2+FRF/USD_lag2+DEM/USD_lag2+JPY/USD_lag2+CHF/USD_lag2

Categories

Resources