Related
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [1], in <cell line: 3>()
1 import torch
----> 3 model = torch.hub.load('C:/Users/user/Desktop/***/model/', 'custom', path='runs/train/***/weights/best.pt', force_reload = True, source='local')
4 # Images
5 imgs = ['/kaggle/input/***/images/image-1.png'] # batch of images
File C:\ProgramData\Anaconda3\lib\site-packages\torch\hub.py:404, in load(repo_or_dir, model, source, force_reload, verbose, skip_validation, *args, **kwargs)
401 if source == 'github':
402 repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, verbose, skip_validation)
--> 404 model = _load_local(repo_or_dir, model, *args, **kwargs)
405 return model
File C:\ProgramData\Anaconda3\lib\site-packages\torch\hub.py:432, in _load_local(hubconf_dir, model, *args, **kwargs)
429 hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)
430 hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
--> 432 entry = _load_entry_from_hubconf(hub_module, model)
433 model = entry(*args, **kwargs)
435 sys.path.remove(hubconf_dir)
File C:\ProgramData\Anaconda3\lib\site-packages\torch\hub.py:240, in _load_entry_from_hubconf(m, model)
237 func = _load_attr_from_module(m, model)
239 if func is None or not callable(func):
--> 240 raise RuntimeError('Cannot find callable {} in hubconf'.format(model))
242 return func
RuntimeError: Cannot find callable custom in hubconf
I use pytorch on my project
I would use saved model which on wandb by using torch.hub.load()
But there is some entry point error, It's not work
I have read related documents and discussions, but I couldn't find a solution.
please help.....
I am trying to run XGboost with with calibrated classifier, below is the snippet of code where I am facing the error:
from sklearn.calibration import CalibratedClassifierCV
from xgboost import XGBClassifier
import numpy as np
x_train =np.array([1,2,2,3,4,5,6,3,4,10,]).reshape(-1,1)
y_train = np.array([1,1,1,1,1,3,3,3,3,3])
x_cfl=XGBClassifier(n_estimators=1)
x_cfl.fit(x_train,y_train)
sig_clf = CalibratedClassifierCV(x_cfl, method="sigmoid")
sig_clf.fit(x_train, y_train)
Error:
TypeError: predict_proba() got an unexpected keyword argument 'X'"
Full Trace:
TypeError Traceback (most recent call last)
<ipython-input-48-08dd0b4ae8aa> in <module>
----> 1 sig_clf.fit(x_train, y_train)
~/anaconda3/lib/python3.8/site-packages/sklearn/calibration.py in fit(self, X, y, sample_weight)
309 parallel = Parallel(n_jobs=self.n_jobs)
310
--> 311 self.calibrated_classifiers_ = parallel(
312 delayed(_fit_classifier_calibrator_pair)(
313 clone(base_estimator), X, y, train=train, test=test,
~/anaconda3/lib/python3.8/site-packages/joblib/parallel.py in __call__(self, iterable)
1039 # remaining jobs.
1040 self._iterating = False
-> 1041 if self.dispatch_one_batch(iterator):
1042 self._iterating = self._original_iterator is not None
1043
~/anaconda3/lib/python3.8/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861
~/anaconda3/lib/python3.8/site-packages/joblib/parallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to
~/anaconda3/lib/python3.8/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
~/anaconda3/lib/python3.8/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
~/anaconda3/lib/python3.8/site-packages/joblib/parallel.py in __call__(self)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264
~/anaconda3/lib/python3.8/site-packages/joblib/parallel.py in <listcomp>(.0)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264
~/anaconda3/lib/python3.8/site-packages/sklearn/utils/fixes.py in __call__(self, *args, **kwargs)
220 def __call__(self, *args, **kwargs):
221 with config_context(**self.config):
--> 222 return self.function(*args, **kwargs)
~/anaconda3/lib/python3.8/site-packages/sklearn/calibration.py in _fit_classifier_calibrator_pair(estimator, X, y, train, test, supports_sw, method, classes, sample_weight)
443 n_classes = len(classes)
444 pred_method = _get_prediction_method(estimator)
--> 445 predictions = _compute_predictions(pred_method, X[test], n_classes)
446
447 sw = None if sample_weight is None else sample_weight[test]
~/anaconda3/lib/python3.8/site-packages/sklearn/calibration.py in _compute_predictions(pred_method, X, n_classes)
499 (X.shape[0], 1).
500 """
--> 501 predictions = pred_method(X=X)
502 if hasattr(pred_method, '__name__'):
503 method_name = pred_method.__name__
TypeError: predict_proba() got an unexpected keyword argument 'X'
I am quite surprised by this, as it was running for me till yesterday, same code is running when I use some other Classifier.
from sklearn.calibration import CalibratedClassifierCV
from xgboost import XGBClassifier
import numpy as np
x_train = np.array([1,2,2,3,4,5,6,3,4,10,]).reshape(-1,1)
y_train = np.array([1,1,1,1,1,3,3,3,3,3])
x_cfl=LGBMClassifier(n_estimators=1)
x_cfl.fit(x_train,y_train)
sig_clf = CalibratedClassifierCV(x_cfl, method="sigmoid")
sig_clf.fit(x_train, y_train)
Output:
CalibratedClassifierCV(base_estimator=LGBMClassifier(n_estimators=1))
Is there a problem with my Xgboost installation?? I use conda for installation and last I remember I had uninstalled xgboost yesterday and installed it again.
my xgboost version:
1.3.0
I believe that the problem comes from XGBoost.
It's explained here: https://github.com/dmlc/xgboost/pull/6555
XGBoost defined:
predict_proba(self, data, ...
instead of:
predict_proba(self, X, ...
And since sklearn 0.24 calls clf.predict_proba(X=X), an exception is thrown.
Here is an idea to fix the problem without changing the version of your packages: Create a class that inherits XGBoostClassifier to override predict_proba with the right argument names and call super().
It's fixed now, seems like there is a bug in scikit-learn= 0.24
I downgraded to 0.22.2.post1 and it was fixed!
Th best way is to upgrade your xgboost
run the following from the jupyter notebook
pip install xgboost --upgrade
after successfully running a training estimator and experiment in an azure ml notebook I am being given Unauthorized errors when trying to register the model. I also see an unauthorized bar pop up in the top when I look at the estimator or the models tab in the azure portal.
This seems like it could be a resource group issue, but I only have one resource group. Has anyone had this issue before?
successful experiment:
from azureml.core.experiment import Experiment
script_params = {
# '--num_epochs': 3,
'--output_dir': './outputs'
}
estimator = PyTorch(source_directory=os.path.join(os.getcwd(), 'Estimator'),
script_params=script_params,
compute_target=compute_target,
entry_script='train.py',
use_gpu=True,
pip_packages=['pillow==5.4.1', 'torch', 'numpy'])
experiment_name = 'pytorch-rnn-generator'
experiment = Experiment(ws, name=experiment_name)
run = experiment.submit(estimator)
run.wait_for_completion(show_output=True)
model registration:
model = run.register_model(model_name='rnn-tv-script-gen', model_path='outputs/')
The stack trace:
ModelErrorResponseException Traceback (most recent call last)
<ipython-input-6-178d7ee9830a> in <module>
1 from azureml.core.model import Model
2
----> 3 model = run.register_model(model_name='rnn-tv-script-gen', model_path='outputs/')
4
5 servive = Model.deploy(ws,
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/core/run.py in register_model(self, model_name, model_path, tags, properties, model_framework, model_framework_version, description, datasets, sample_input_dataset, sample_output_dataset, resource_configuration, **kwargs)
1988 model_name, model_path, tags, properties, model_framework, model_framework_version,
1989 description=description, datasets=datasets, unpack=False, sample_input_dataset=sample_input_dataset,
-> 1990 sample_output_dataset=sample_output_dataset, resource_configuration=resource_configuration, **kwargs)
1991
1992 def _update_dataset_lineage(self, datasets):
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_run_impl/run_history_facade.py in register_model(self, model_name, model_path, tags, properties, model_framework, model_framework_version, asset_id, sample_input_dataset, sample_output_dataset, resource_configuration, **kwargs)
386 artifacts,
387 metadata_dict=metadata_dict,
--> 388 run_id=self._run_id)
389 asset_id = asset.id
390 else:
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/assets_client.py in create_asset(self, model_name, artifact_values, metadata_dict, project_id, run_id, tags, properties)
50 "meta": metadata_dict,
51 "CreatedTime": created_time}
---> 52 return self._execute_with_workspace_arguments(self._client.asset.create, payload)
53
54 def get_assets_by_run_id_and_name(self, run_id, name):
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/workspace_client.py in _execute_with_workspace_arguments(self, func, *args, **kwargs)
69
70 def _execute_with_workspace_arguments(self, func, *args, **kwargs):
---> 71 return self._execute_with_arguments(func, copy.deepcopy(self._workspace_arguments), *args, **kwargs)
72
73 def _execute_with_arguments(self, func, args_list, *args, **kwargs):
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/workspace_client.py in _execute_with_arguments(self, func, args_list, *args, **kwargs)
85 return self._call_paginated_api(func, *args_list, **kwargs)
86 else:
---> 87 return self._call_api(func, *args_list, **kwargs)
88 except ErrorResponseException as e:
89 raise ServiceException(e)
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/clientbase.py in _call_api(self, func, *args, **kwargs)
224 return AsyncTask(future, _ident=ident, _parent_logger=self._logger)
225 else:
--> 226 return self._execute_with_base_arguments(func, *args, **kwargs)
227
228 def _call_paginated_api(self, func, *args, **kwargs):
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/clientbase.py in _execute_with_base_arguments(self, func, *args, **kwargs)
277 total_retry = 0 if self.retries < 0 else self.retries
278 return ClientBase._execute_func_internal(
--> 279 back_off, total_retry, self._logger, func, _noop_reset, *args, **kwargs)
280
281 #classmethod
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/clientbase.py in _execute_func_internal(cls, back_off, total_retry, logger, func, reset_func, *args, **kwargs)
292 return func(*args, **kwargs)
293 except Exception as error:
--> 294 left_retry = cls._handle_retry(back_off, left_retry, total_retry, error, logger, func)
295
296 reset_func(*args, **kwargs) # reset_func is expected to undo any side effects from a failed func call.
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/clientbase.py in _handle_retry(cls, back_off, left_retry, total_retry, error, logger, func)
341 back_off = DEFAULT_503_BACKOFF
342 elif error.response.status_code < 500:
--> 343 raise error
344 elif not isinstance(error, RETRY_EXCEPTIONS):
345 raise error
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/clientbase.py in _execute_func_internal(cls, back_off, total_retry, logger, func, reset_func, *args, **kwargs)
290 while left_retry >= 0:
291 try:
--> 292 return func(*args, **kwargs)
293 except Exception as error:
294 left_retry = cls._handle_retry(back_off, left_retry, total_retry, error, logger, func)
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/azureml/_restclient/operations/asset_operations.py in create(self, subscription_id, resource_group_name, workspace, asset, custom_headers, raw, **operation_config)
88
89 if response.status_code not in [200]:
---> 90 raise models.ModelErrorResponseException(self._deserialize, response)
91
92 deserialized = None
ModelErrorResponseException: Unauthorized
Definitely a strange error. Looks like you're using this guide as a reference. Without seeing what your train.py looks like, I'd be curious to know if you:
get the error when running the ipynb of the guide without making any changes?
still have a snippet like below in your code?
os.makedirs(args.output_dir, exist_ok=True)
torch.save(model, os.path.join(args.output_dir, 'model.pt'))
get a similar error if you try to download the file like shown in the second snippet of this docs section?
I am trying to run the example for MaskedAutoregressiveFlow at https://www.tensorflow.org/api_docs/python/tf/contrib/distributions/bijectors/MaskedAutoregressiveFlow. It's a plain copy from the docs but I receive the following error. I've tried event_shape=[dims, 1] but that doesn't seem to help (different error). I'm not sure what to make of it.
Has anyone seen this as well?
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from tensorflow.contrib.distributions import bijectors as tfb
dims = 5
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
maf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])),
event_shape=[dims])
x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
maf.log_prob(x) # Almost free; uses Bijector caching.
maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-3b2fcb2af309> in <module>()
11
12
---> 13 x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
14 maf.log_prob(x) # Almost free; uses Bijector caching.
15 maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/distribution.py in sample(self, sample_shape, seed, name)
687 samples: a `Tensor` with prepended dimensions `sample_shape`.
688 """
--> 689 return self._call_sample_n(sample_shape, seed, name)
690
691 def _log_prob(self, value):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/transformed_distribution.py in _call_sample_n(self, sample_shape, seed, name, **kwargs)
411 # work, it is imperative that this is the last modification to the
412 # returned result.
--> 413 y = self.bijector.forward(x, **kwargs)
414 y = self._set_sample_static_shape(y, sample_shape)
415
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/bijector_impl.py in forward(self, x, name)
618 NotImplementedError: if `_forward` is not implemented.
619 """
--> 620 return self._call_forward(x, name)
621
622 def _inverse(self, y):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/bijector_impl.py in _call_forward(self, x, name, **kwargs)
599 if mapping.y is not None:
600 return mapping.y
--> 601 mapping = mapping.merge(y=self._forward(x, **kwargs))
602 self._cache(mapping)
603 return mapping.y
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in _forward(self, x)
245 y0 = array_ops.zeros_like(x, name="y0")
246 # call the template once to ensure creation
--> 247 _ = self._shift_and_log_scale_fn(y0)
248 def _loop_body(index, y0):
249 """While-loop body for autoregression calculation."""
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py in __call__(self, *args, **kwargs)
358 custom_getter=self._custom_getter) as vs:
359 self._variable_scope = vs
--> 360 result = self._call_func(args, kwargs)
361 return result
362
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py in _call_func(self, args, kwargs)
300 trainable_at_start = len(
301 ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
--> 302 result = self._func(*args, **kwargs)
303
304 if self._variables_created:
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in _fn(x)
478 activation=activation,
479 *args,
--> 480 **kwargs)
481 x = masked_dense(
482 inputs=x,
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in masked_dense(inputs, units, num_blocks, exclusive, kernel_initializer, reuse, name, *args, **kwargs)
386 *args,
387 **kwargs)
--> 388 return layer.apply(inputs)
389
390
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in apply(self, inputs, *args, **kwargs)
807 Output tensor(s).
808 """
--> 809 return self.__call__(inputs, *args, **kwargs)
810
811 def _add_inbound_node(self,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in __call__(self, inputs, *args, **kwargs)
671
672 # Check input assumptions set before layer building, e.g. input rank.
--> 673 self._assert_input_compatibility(inputs)
674 if input_list and self._dtype is None:
675 try:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in _assert_input_compatibility(self, inputs)
1195 ', found ndim=' + str(ndim) +
1196 '. Full shape received: ' +
-> 1197 str(x.get_shape().as_list()))
1198 # Check dtype.
1199 if spec.dtype is not None:
ValueError: Input 0 of layer dense_1 is incompatible with the layer: : expected min_ndim=2, found ndim=1. Full shape received: [5]
originally defined at:
File "<ipython-input-2-3b2fcb2af309>", line 9, in <module>
hidden_layers=[512, 512])),
File "/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py", line 499, in masked_autoregressive_default_template
"masked_autoregressive_default_template", _fn)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py", line 152, in make_template
**kwargs)
How I can use the predict method
after fitting a model (http://nbviewer.ipython.org/urls/umich.box.com/shared/static/6tfc1e0q6jincsv5pgfa.ipynb)
With the simple dietox example I get an error.
data = pd.read_csv("dietox.csv")
model = sm.MixedLM.from_formula("Weight ~ Time", data, groups=data["Pig"])
result = model.fit()
print result.summary()
#this and other attempts doesn't work
result.predict(data.ix[1])
NotImplementedError Traceback (most recent call last)
<ipython-input-7-ba818b886233> in <module>()
----> 1 result.predict(data.ix[1])
C:\Anaconda\lib\site-packages\statsmodels\base\model.pyc in predict(self, exog, transform, *args, **kwargs)
747 exog = np.atleast_2d(exog) # needed in count model shape[1]
748
--- > 749 return self.model.predict(self.params, exog, *args, **kwargs)
750
751
C:\Anaconda\lib\site-packages\statsmodels\base\model.pyc in predict(self, params, exog, *args, **kwargs)
175 This is a placeholder intended to be overwritten by individual models.
176 """
--> 177 raise NotImplementedError
178
179
NotImplementedError: