The 'Box' object has no attribute 'spaces' - python

I'm trying to implement a game class where you have to stay in the 49-51 number range as long as possible. The state space is given by a range from 0 to 100, the initial state is the number 47 or the number 53 (chosen randomly), and you can change the state of the environment by three actions - adding 0, adding 1 or adding -1. Also, after each action there is a random addition of 1 or -1
I need to choose an algorithm from the baseline3 library and train it. I train the PPO algorithm, but I get the following error:
The 'Box' object has no attribute 'spaces'.
Box has no 'spaces' attribute, what could be the problem?
import numpy as np
from stable_baselines3 import PPO
import random
from gym import Env
from gym.spaces import Discrete, Box
class CustomEnv(Env):
def __init__(self):
self.action_space = Discrete(3)
self.observation_space = Box(low=np.array([0]), high=np.array([100]))
self.state = 50 +- random.randint(-3, 3)
self.length = 120
def step(self, action):
self.state += action-1
self.length -= 1
if self.state >= 49 and self.state <= 51:
reward = 1
else:
reward = -1
if self.length <= 0:
done = True
else:
done = False
self.state += random.randint(-1, 1)
return self.state, reward, done, {}
def reset(self):
self.state = 50 +- random.randint(-3, 3)
self.length = 120
env = CustomEnv()
model = PPO("MultiInputPolicy", env)
model.learn(total_timesteps=20000)
AttributeError Traceback (most recent call last)
Input In [148], in <cell line: 41>()
38 self.length = 120
40 env = CustomEnv()
---> 41 model = PPO("MultiInputPolicy", env)
42 model.learn(total_timesteps=20000)
File E:\Anaconda\lib\site-packages\stable_baselines3\ppo\ppo.py:162, in PPO.__init__(self, policy, env, learning_rate, n_steps, batch_size, n_epochs, gamma, gae_lambda, clip_range, clip_range_vf, normalize_advantage, ent_coef, vf_coef, max_grad_norm, use_sde, sde_sample_freq, target_kl, tensorboard_log, create_eval_env, policy_kwargs, verbose, seed, device, _init_setup_model)
159 self.target_kl = target_kl
161 if _init_setup_model:
--> 162 self._setup_model()
File E:\Anaconda\lib\site-packages\stable_baselines3\ppo\ppo.py:165, in PPO._setup_model(self)
164 def _setup_model(self) -> None:
--> 165 super()._setup_model()
167 # Initialize schedules for policy/value clipping
168 self.clip_range = get_schedule_fn(self.clip_range)
File E:\Anaconda\lib\site-packages\stable_baselines3\common\on_policy_algorithm.py:117, in OnPolicyAlgorithm._setup_model(self)
106 buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer
108 self.rollout_buffer = buffer_cls(
109 self.n_steps,
110 self.observation_space,
(...)
115 n_envs=self.n_envs,
116 )
--> 117 self.policy = self.policy_class( # pytype:disable=not-instantiable
118 self.observation_space,
119 self.action_space,
120 self.lr_schedule,
121 use_sde=self.use_sde,
122 **self.policy_kwargs # pytype:disable=not-instantiable
123 )
124 self.policy = self.policy.to(self.device)
File E:\Anaconda\lib\site-packages\stable_baselines3\common\policies.py:802, in MultiInputActorCriticPolicy.__init__(self, observation_space, action_space, lr_schedule, net_arch, activation_fn, ortho_init, use_sde, log_std_init, full_std, sde_net_arch, use_expln, squash_output, features_extractor_class, features_extractor_kwargs, normalize_images, optimizer_class, optimizer_kwargs)
782 def __init__(
783 self,
784 observation_space: gym.spaces.Dict,
(...)
800 optimizer_kwargs: Optional[Dict[str, Any]] = None,
801 ):
--> 802 super().__init__(
803 observation_space,
804 action_space,
805 lr_schedule,
806 net_arch,
807 activation_fn,
808 ortho_init,
809 use_sde,
810 log_std_init,
811 full_std,
812 sde_net_arch,
813 use_expln,
814 squash_output,
815 features_extractor_class,
816 features_extractor_kwargs,
817 normalize_images,
818 optimizer_class,
819 optimizer_kwargs,
820 )
File E:\Anaconda\lib\site-packages\stable_baselines3\common\policies.py:461, in ActorCriticPolicy.__init__(self, observation_space, action_space, lr_schedule, net_arch, activation_fn, ortho_init, use_sde, log_std_init, full_std, sde_net_arch, use_expln, squash_output, features_extractor_class, features_extractor_kwargs, normalize_images, optimizer_class, optimizer_kwargs)
458 self.activation_fn = activation_fn
459 self.ortho_init = ortho_init
--> 461 self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
462 self.features_dim = self.features_extractor.features_dim
464 self.normalize_images = normalize_images
File E:\Anaconda\lib\site-packages\stable_baselines3\common\torch_layers.py:258, in CombinedExtractor.__init__(self, observation_space, cnn_output_dim)
255 extractors = {}
257 total_concat_size = 0
--> 258 for key, subspace in observation_space.spaces.items():
259 if is_image_space(subspace):
260 extractors[key] = NatureCNN(subspace, features_dim=cnn_output_dim)
AttributeError: 'Box' object has no attribute 'spaces'

Related

Unicode encoder error in LatentDirichletAllocation

When I tried to perform LatentDirichlet Allocation on an array,it kept showing
UnicodeEncodeError Traceback (most recent call last)
<timed exec> in <module>
D:\Anacondo\lib\site-packages\sklearn\decomposition\_lda.py in fit(self, X, y)
624 last_bound = None
625 n_jobs = effective_n_jobs(self.n_jobs)
--> 626 with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
627 for i in range(max_iter):
628 if learning_method == "online":
D:\Anacondo\lib\site-packages\joblib\parallel.py in __enter__(self)
723 def __enter__(self):
724 self._managed_backend = True
--> 725 self._initialize_backend()
726 return self
727
D:\Anacondo\lib\site-packages\joblib\parallel.py in _initialize_backend(self)
733 """Build a process or thread pool and return the number of workers"""
734 try:
--> 735 n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
736 **self._backend_args)
737 if self.timeout is not None and not self._backend.supports_timeout:
D:\Anacondo\lib\site-packages\joblib\_parallel_backends.py in configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args)
492 SequentialBackend(nesting_level=self.nesting_level))
493
--> 494 self._workers = get_memmapping_executor(
495 n_jobs, timeout=idle_worker_timeout,
496 env=self._prepare_worker_env(n_jobs=n_jobs),
D:\Anacondo\lib\site-packages\joblib\executor.py in get_memmapping_executor(n_jobs, **kwargs)
18
19 def get_memmapping_executor(n_jobs, **kwargs):
---> 20 return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
21
22
D:\Anacondo\lib\site-packages\joblib\executor.py in get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args)
40 _executor_args = executor_args
41
---> 42 manager = TemporaryResourcesManager(temp_folder)
43
44 # reducers access the temporary folder in which to store temporary
D:\Anacondo\lib\site-packages\joblib\_memmapping_reducer.py in __init__(self, temp_folder_root, context_id)
529 # exposes exposes too many low-level details.
530 context_id = uuid4().hex
--> 531 self.set_current_context(context_id)
532
533 def set_current_context(self, context_id):
D:\Anacondo\lib\site-packages\joblib\_memmapping_reducer.py in set_current_context(self, context_id)
533 def set_current_context(self, context_id):
534 self._current_context_id = context_id
--> 535 self.register_new_context(context_id)
536
537 def register_new_context(self, context_id):
D:\Anacondo\lib\site-packages\joblib\_memmapping_reducer.py in register_new_context(self, context_id)
558 new_folder_name, self._temp_folder_root
559 )
--> 560 self.register_folder_finalizer(new_folder_path, context_id)
561 self._cached_temp_folders[context_id] = new_folder_path
562
D:\Anacondo\lib\site-packages\joblib\_memmapping_reducer.py in register_folder_finalizer(self, pool_subfolder, context_id)
588 # semaphores and pipes
589 pool_module_name = whichmodule(delete_folder, 'delete_folder')
--> 590 resource_tracker.register(pool_subfolder, "folder")
591
592 def _cleanup():
D:\Anacondo\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in register(self, name, rtype)
189 '''Register a named resource, and increment its refcount.'''
190 self.ensure_running()
--> 191 self._send('REGISTER', name, rtype)
192
193 def unregister(self, name, rtype):
D:\Anacondo\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _send(self, cmd, name, rtype)
202
203 def _send(self, cmd, name, rtype):
--> 204 msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
205 if len(name) > 512:
206 # posix guarantees that writes to a pipe of less than PIPE_BUF
UnicodeEncodeError: 'ascii' codec can't encode characters in position 18-19: ordinal not in range(128)
The following code was provided by the instructor and yet no changes have been made.
import pandas as pd
df = pd.read_csv("android.csv", sep=",", thousands=",")
df["Number of ratings"] = df["Number of ratings"].astype(int) # fix data type
df = df.drop_duplicates(subset=["App"]).reset_index(drop=True)
df.head(n=3)
permission_columns = list(df.columns[10:])
app_names = list(df["App"])
app_ratings = np.array(df["Number of ratings"])
df_perms = df[permission_columns]
X = df_perms.values
Below is my code to use TF-IDF and LatentDirichletAllocation to fit the data.
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer()
X_tfidf = transformer.fit_transform(X)
# convert sparse matrix to numpy array
X_tfidf = X_tfidf.toarray()
%%time
from sklearn.decomposition import LatentDirichletAllocation
n_topics = 10
lda = LatentDirichletAllocation(n_components=n_topics, max_iter=10,
learning_method='online',
n_jobs=-1, random_state=3)
lda.fit(X_tfidf)
However,it kept showing UnicodeEncodingError. I tried to add
df = pd.read_csv("android.csv", sep=",", thousands=",",engine='python',encoding = 'utf-8-sig')
it doesn't work.I tried several different encoding methods,it doesn't work either.
Is there anyway I can solve the issue? I think there might be something wrong with the X_tfidf array,but I can't tell.
Any help will be appreciated!
I tried with another array which is not taken from this dataset.It still doesn't work and shows the same error & traceback.
Edit:this works fine with Google Colab.There might be something wrong with my Jupyter Notebook settings.

TypeError: an integer is required (got type tuple) , when calling next( iter( ) )

I was trying to load some data using pytorch, the code is like the followings:
test_ds = ImageFolder(root="./test", transform=data_transform)
test_dl = DataLoader(test_ds,batch_size=12)
x , y= next(iter(test_dl))
when next(iter(test_dl)) is called, it throws TypeError: an integer is required (got type tuple) , I could not figure out why, since earlier when I did the same task using MAC, the result is OK, does this has something to do with OS
The full traceback:
TypeError Traceback (most recent call last)
<ipython-input-89-cecf634332ce> in <module>
----> 1 next(iter(test_dl))
D:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
312 if self.num_workers == 0: # same-process loading
313 indices = next(self.sample_iter) # may raise StopIteration
--> 314 batch = self.collate_fn([self.dataset[i] for i in indices])
315 if self.pin_memory:
316 batch = pin_memory_batch(batch)
D:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py in <listcomp>(.0)
312 if self.num_workers == 0: # same-process loading
313 indices = next(self.sample_iter) # may raise StopIteration
--> 314 batch = self.collate_fn([self.dataset[i] for i in indices])
315 if self.pin_memory:
316 batch = pin_memory_batch(batch)
D:\Anaconda3\lib\site-packages\torchvision\datasets\folder.py in __getitem__(self, index)
101 sample = self.loader(path)
102 if self.transform is not None:
--> 103 sample = self.transform(sample)
104 if self.target_transform is not None:
105 target = self.target_transform(target)
D:\Anaconda3\lib\site-packages\torchvision\transforms\transforms.py in __call__(self, img)
47 def __call__(self, img):
48 for t in self.transforms:
---> 49 img = t(img)
50 return img
51
D:\Anaconda3\lib\site-packages\torchvision\transforms\transforms.py in __call__(self, img)
544 """
545 i, j, h, w = self.get_params(img, self.scale, self.ratio)
--> 546 return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
547
548 def __repr__(self):
D:\Anaconda3\lib\site-packages\torchvision\transforms\functional.py in resized_crop(img, i, j, h, w, size, interpolation)
329 assert _is_pil_image(img), 'img should be PIL Image'
330 img = crop(img, i, j, h, w)
--> 331 img = resize(img, size, interpolation)
332 return img
333
D:\Anaconda3\lib\site-packages\torchvision\transforms\functional.py in resize(img, size, interpolation)
204 return img.resize((ow, oh), interpolation)
205 else:
--> 206 return img.resize(size[::-1], interpolation)
207
208
D:\Anaconda3\lib\site-packages\PIL\Image.py in resize(self, size, resample, box)
1890 self.load()
1891
-> 1892 return self._new(self.im.resize(size, resample, box))
1893
1894 def rotate(
TypeError: an integer is required (got type tuple)

Custom Multiple Input Primitive Bug returns "TypeError: issubclass() arg 1 must be a class"

I am using Featuretools library to try to generate custom features involving customer transactions. I tested the function and it returns the answer so I am not sure why I am getting this error.
I tried using the following link:
https://featuretools.alteryx.com/en/stable/getting_started/primitives.html
Thank you!
from featuretools.primitives import make_agg_primitive
from featuretools.variable_types import DatetimeTimeIndex, Numeric, Categorical
def test_fun(categorical, datetimeindex):
x = pd.DataFrame({'store_name': categorical, 'session_start_time': datetimeindex})
x_mode = list(x['store_name'].mode())[0]
x = x[x['store_name'] == x_mode]
y = x.session_start_time.diff().fillna(pd.Timedelta(seconds=0))/np.timedelta64(1, 's')
return y.median()
Test_Fun = make_agg_primitive(function = test_fun,
input_types = [Categorical, DatetimeTimeIndex],
return_type = [Numeric])
fm, fd = ft.dfs(
entityset = es,
target_entity = 'customers',
agg_primitives = [Test_Fun],
cutoff_time = lt,
cutoff_time_in_index = True,
include_cutoff_time = False,
verbose = True,
)
Results in the following error
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-492-358f980bb6b0> in <module>
20 return_type = [Numeric])
21
---> 22 fm, fd = ft.dfs(
23 entityset = es,
24 target_entity = 'customers',
~\Anaconda3\lib\site-packages\featuretools\utils\entry_point.py in function_wrapper(*args, **kwargs)
38 ep.on_error(error=e,
39 runtime=runtime)
---> 40 raise e
41
42 # send return value
~\Anaconda3\lib\site-packages\featuretools\utils\entry_point.py in function_wrapper(*args, **kwargs)
30 # call function
31 start = time.time()
---> 32 return_value = func(*args, **kwargs)
33 runtime = time.time() - start
34 except Exception as e:
~\Anaconda3\lib\site-packages\featuretools\synthesis\dfs.py in dfs(entities, relationships, entityset, target_entity, cutoff_time, instance_ids, agg_primitives, trans_primitives, groupby_trans_primitives, allowed_paths, max_depth, ignore_entities, ignore_variables, primitive_options, seed_features, drop_contains, drop_exact, where_primitives, max_features, cutoff_time_in_index, save_progress, features_only, training_window, approximate, chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types, progress_callback, include_cutoff_time)
259 seed_features=seed_features)
260
--> 261 features = dfs_object.build_features(
262 verbose=verbose, return_variable_types=return_variable_types)
263
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in build_features(self, return_variable_types, verbose)
287 assert isinstance(return_variable_types, list), msg
288
--> 289 self._run_dfs(self.es[self.target_entity_id], RelationshipPath([]),
290 all_features, max_depth=self.max_depth)
291
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in _run_dfs(self, entity, relationship_path, all_features, max_depth)
412 """
413
--> 414 self._build_transform_features(all_features, entity, max_depth=max_depth)
415
416 """
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in _build_transform_features(self, all_features, entity, max_depth, require_direct_input)
576 input_types = input_types[0]
577
--> 578 matching_inputs = self._get_matching_inputs(all_features,
579 entity,
580 new_max_depth,
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in _get_matching_inputs(self, all_features, entity, max_depth, input_types, primitive, primitive_options, require_direct_input, feature_filter)
793 primitive, primitive_options, require_direct_input=False,
794 feature_filter=None):
--> 795 features = self._features_by_type(all_features=all_features,
796 entity=entity,
797 max_depth=max_depth,
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in _features_by_type(self, all_features, entity, max_depth, variable_type)
768 if (variable_type == variable_types.PandasTypes._all or
769 f.variable_type == variable_type or
--> 770 any(issubclass(f.variable_type, vt) for vt in variable_type)):
771 if max_depth is None or f.get_depth(stop_at=self.seed_features) <= max_depth:
772 selected_features.append(f)
~\Anaconda3\lib\site-packages\featuretools\synthesis\deep_feature_synthesis.py in <genexpr>(.0)
768 if (variable_type == variable_types.PandasTypes._all or
769 f.variable_type == variable_type or
--> 770 any(issubclass(f.variable_type, vt) for vt in variable_type)):
771 if max_depth is None or f.get_depth(stop_at=self.seed_features) <= max_depth:
772 selected_features.append(f)
TypeError: issubclass() arg 1 must be a class
I think I figured it out. If there exists a better way, please let me know!
I'm not sure why the approach in the documentation didn't work (it uses functions instead of classes and made no mention of classes).
I was able to leverage the solution from this question to solve the problem:
How to get an item's group mean but exclude the item itself?
from featuretools.primitives import AggregationPrimitive
class Test_Fun(AggregationPrimitive):
name = "test_fun"
input_types = [Categorical, DatetimeTimeIndex]
return_type = Numeric
stack_on_self = False
def get_function(self):
def mean_excluding_value(categorical, datetimeindex):
x = pd.DataFrame({'store_name': categorical, 'session_start_time': datetimeindex})
x_mode = list(x['store_name'].mode())[0]
x = x[x['store_name'] == x_mode]
y = x.session_start_time.diff().fillna(pd.Timedelta(seconds=0))/np.timedelta64(1, 's')
return y.median()
return mean_excluding_value
fm, fd = ft.dfs(
entityset = es,
target_entity = 'customers',
agg_primitives = [Test_Fun],
cutoff_time = lt,
cutoff_time_in_index = True,
include_cutoff_time = False,
verbose = True,
)
In this section of the code:
Test_Fun = make_agg_primitive(function = test_fun,
input_types = [Categorical, DatetimeTimeIndex],
return_type = [Numeric])
return_type should be set to Numeric instead of [Numeric]
This code worked for me:
Test_Fun = make_agg_primitive(function = test_fun,
input_types = [Categorical, DatetimeTimeIndex],
return_type = Numeric)

NotImplementedError: Failed in nopython mode pipeline. Use of unknown opcode MAP_ADD at line 116 of <ipython-input-287-147d4798a88b>

I try to launch a code with Numba and I get errors.
What I want to do is to compute the cosine similarity with a cosinus_sparse function. This class method I use in the search class method, then I call search in the results method. Although I added the #jit decorator before each method I have this implementation error that appears.
Here is my code:
import numpy as np
from numba import jit
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import pandas as pd
import math
class Search:
def __init__(self, corpus, method='XTERM', stop_words='english', max_df=1.0, min_df=1, max_features=None):
self.corpus = corpus
self.method = method
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.vectorization()
self.get_shape()
self.features_names = self.bag_of_word.get_feature_names()
def vectorization(self):
if self.method == 'XTERM':
self.bag_of_word = CountVectorizer(stop_words=self.stop_words,
max_df=self.max_df, min_df=self.min_df,
max_features=self.max_features)
self.corpus_vectorized = self.bag_of_word.fit_transform(self.corpus)
elif self.method == 'TFxIDF':
self.bag_of_word = TfidfVectorizer(stop_words=self.stop_words,
max_df=self.max_df, min_df=self.min_df,
max_features=self.max_features)
self.corpus_vectorized = self.bag_of_word.fit_transform(self.corpus)
else:
raise MethodError('Method provided is not valid')
def get_shape(self):
self.n_docs, self.n_terms = self.corpus_vectorized.shape
def get_query(self, query):
self.indexes = [self.features_names.index(q) for q in query if q in self.features_names]
self.query_vec = np.zeros(self.n_terms)
self.query_vec[self.indexes] = 1
#staticmethod
#jit(nopython=True)
def cosinus_sparse(i, j):
num = i.dot(j)
spars = i * i.transpose()
den = math.sqrt(spars[0, 0]) * math.sqrt(sum(j * j))
if (den > 0):
return int(num) / den
else:
return 0
#jit(nopython=True)
def search(self, q) -> dict:
cc = {i: self.cosinus_sparse(self.corpus_vectorized[i, :], q) for i in range(self.n_docs)}
cc = sorted(cc.items(), key=lambda x: x[1], reverse=True)
return cc
#jit
def get_result(self) -> list:
self.result = self.search(self.query_vec)
def result_announcer(self):
self.search_lenght = len([i for i in self.result if i[1] > 0])
print('{} documents linked to your query where found'.format(search_lenght))
def verif_query_vec(self, query):
if int(sum(self.query_vec)) != len(query):
raise QueryError('Error in query or query_vec')
def processing(self, query):
try:
self.get_query(query)
self.verif_query_vec(query)
self.get_result()
except NameError:
self.vectorisation()
self.get_shape()
self.get_feature_names()
self.get_query(query)
self.verif_query_vec(query)
self.get_result()
import ipywidgets as widgets
from IPython.display import display
text = widgets.Text(
value='',
placeholder='Type words',
description='String:',
disabled=False
)
method_radio = widgets.RadioButtons(
options=['XTERM', 'TFxIDF'],
# value='TF',
description='Method:',
disabled=False
)
submit = widgets.Button(description = 'Search')
display(widgets.VBox([text, radio, submit]))
def handle_submit(sender):
global query
query = text.value.lower().split(' ')
method = method_radio.value
# instentiation de l'objet de recherche
global search_obj
search_obj = Search(corpus=corpus, method=method, )
search_obj.processing(query)
submit.on_click(handle_submit)
Here is the error
NotImplementedError Traceback (most recent call last)
<ipython-input-288-025a488daa60> in handle_submit(sender)
27 global search_obj
28 search_obj = Search(corpus=corpus, method=method, )
---> 29 search_obj.processing(query)
30
31 submit.on_click(handle_submit)
<ipython-input-287-147d4798a88b> in processing(self, query)
167 self.get_query(query)
168 self.verif_query_vec(query)
--> 169 self.get_result()
170
171 except NameError:
~\Anaconda3\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
365 e.patch_message(''.join(e.args) + help_msg)
366 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 367 raise e
368
369 def inspect_llvm(self, signature=None):
~\Anaconda3\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
322 argtypes.append(self.typeof_pyval(a))
323 try:
--> 324 return self.compile(tuple(argtypes))
325 except errors.TypingError as e:
326 # Intercept typing error that may be due to an argument
~\Anaconda3\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~\Anaconda3\lib\site-packages\numba\dispatcher.py in compile(self, sig)
653
654 self._cache_misses[sig] += 1
--> 655 cres = self._compiler.compile(args, return_type)
656 self.add_overload(cres)
657 self._cache.save_overload(sig, cres)
~\Anaconda3\lib\site-packages\numba\dispatcher.py in compile(self, args, return_type)
80 args=args, return_type=return_type,
81 flags=flags, locals=self.locals,
---> 82 pipeline_class=self.pipeline_class)
83 # Check typing error if object mode is used
84 if cres.typing_error is not None and not flags.enable_pyobject:
~\Anaconda3\lib\site-packages\numba\compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
924 pipeline = pipeline_class(typingctx, targetctx, library,
925 args, return_type, flags, locals)
--> 926 return pipeline.compile_extra(func)
927
928
~\Anaconda3\lib\site-packages\numba\compiler.py in compile_extra(self, func)
372 self.lifted = ()
373 self.lifted_from = None
--> 374 return self._compile_bytecode()
375
376 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~\Anaconda3\lib\site-packages\numba\compiler.py in _compile_bytecode(self)
855 """
856 assert self.func_ir is None
--> 857 return self._compile_core()
858
859 def _compile_ir(self):
~\Anaconda3\lib\site-packages\numba\compiler.py in _compile_core(self)
842 self.define_pipelines(pm)
843 pm.finalize()
--> 844 res = pm.run(self.status)
845 if res is not None:
846 # Early pipeline completion
~\Anaconda3\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~\Anaconda3\lib\site-packages\numba\compiler.py in run(self, status)
253 # No more fallback pipelines?
254 if is_final_pipeline:
--> 255 raise patched_exception
256 # Go to next fallback pipeline
257 else:
~\Anaconda3\lib\site-packages\numba\compiler.py in run(self, status)
244 try:
245 event(stage_name)
--> 246 stage()
247 except _EarlyPipelineCompletion as e:
248 return e.result
~\Anaconda3\lib\site-packages\numba\compiler.py in stage_inline_pass(self)
582 self.flags.auto_parallel,
583 self.parfor_diagnostics.replaced_fns)
--> 584 inline_pass.run()
585 # Remove all Dels, and re-run postproc
586 post_proc = postproc.PostProcessor(self.func_ir)
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in run(self)
75
76 if guard(self._inline_closure,
---> 77 work_list, block, i, func_def):
78 modified = True
79 break # because block structure changed
~\Anaconda3\lib\site-packages\numba\ir_utils.py in guard(func, *args, **kwargs)
1358 """
1359 try:
-> 1360 return func(*args, **kwargs)
1361 except GuardException:
1362 return None
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in _inline_closure(self, work_list, block, i, func_def)
212 inline_closure_call(self.func_ir,
213 self.func_ir.func_id.func.__globals__,
--> 214 block, i, func_def, work_list=work_list)
215 return True
216
~\Anaconda3\lib\site-packages\numba\inline_closurecall.py in inline_closure_call(func_ir, glbls, block, i, callee, typingctx, arg_typs, typemap, calltypes, work_list)
253 callee_closure = callee.closure if hasattr(callee, 'closure') else callee.__closure__
254 # first, get the IR of the callee
--> 255 callee_ir = get_ir_of_code(glbls, callee_code)
256 callee_blocks = callee_ir.blocks
257
~\Anaconda3\lib\site-packages\numba\ir_utils.py in get_ir_of_code(glbls, fcode)
1572 f.__name__ = fcode.co_name
1573 from numba import compiler
-> 1574 ir = compiler.run_frontend(f)
1575 # we need to run the before inference rewrite pass to normalize the IR
1576 # XXX: check rewrite pass flag?
~\Anaconda3\lib\site-packages\numba\compiler.py in run_frontend(func)
168 interp = interpreter.Interpreter(func_id)
169 bc = bytecode.ByteCode(func_id=func_id)
--> 170 func_ir = interp.interpret(bc)
171 post_proc = postproc.PostProcessor(func_ir)
172 post_proc.run()
~\Anaconda3\lib\site-packages\numba\interpreter.py in interpret(self, bytecode)
101 # Data flow analysis
102 self.dfa = dataflow.DataFlowAnalysis(self.cfa)
--> 103 self.dfa.run()
104
105 # Temp states during interpretation
~\Anaconda3\lib\site-packages\numba\dataflow.py in run(self)
26 def run(self):
27 for blk in self.cfa.iterliveblocks():
---> 28 self.infos[blk.offset] = self.run_on_block(blk)
29
30 def run_on_block(self, blk):
~\Anaconda3\lib\site-packages\numba\dataflow.py in run_on_block(self, blk)
76 for offset in blk:
77 inst = self.bytecode[offset]
---> 78 self.dispatch(info, inst)
79 return info
80
~\Anaconda3\lib\site-packages\numba\dataflow.py in dispatch(self, info, inst)
86 fname = "op_%s" % inst.opname.replace('+', '_')
87 fn = getattr(self, fname, self.handle_unknown_opcode)
---> 88 fn(info, inst)
89
90 def handle_unknown_opcode(self, info, inst):
~\Anaconda3\lib\site-packages\numba\dataflow.py in handle_unknown_opcode(self, info, inst)
91 msg = "Use of unknown opcode {} at line {} of {}"
92 raise NotImplementedError(msg.format(inst.opname, inst.lineno,
---> 93 self.bytecode.func_id.filename))
94
95 def dup_topx(self, info, inst, count):
NotImplementedError: Failed in nopython mode pipeline (step: inline calls to locally defined closures)
Use of unknown opcode MAP_ADD at line 116 of <ipython-input-287-147d4798a88b>
How do I fix this error?
Thanks a lot for your help.

UnsupportedOperation: not writabl, in python while counting coins in chainsaw

What I'm trying to do is to print ot the total number of coins in blockchain.
I'm working with the examples from the Chainscan manual
Here is my code so far:
from chainscan import iter_blocks
total_btc = 0
for block in iter_blocks(show_progressbar = True):
coinbase_tx = next(iter(block.txs)) # the first tx is coinbase
total_btc += coinbase_tx.get_total_output_value()
print('Total %d satoshis (up to block height %d)' % (total_btc, block.height))
The Problem is that I get an UnsupportedOperation error. Here is the traceback:
UnsupportedOperation Traceback (most recent call last)
<ipython-input-3-2b54aca19755> in <module>()
1 total_btc = 0
----> 2 for block in iter_blocks(show_progressbar = True):
3 coinbase_tx = next(iter(block.txs)) # the first tx is coinbase
4 total_btc += coinbase_tx.get_total_output_value()
5 print('Total %d satoshis (up to block height %d)' % (total_btc, block.height))
/usr/local/lib/python3.5/dist-packages/chainscan/utils.py in iter_blocks(block_iter, **kwargs)
23 """
24 if block_iter is None:
---> 25 block_iter = LongestChainBlockIterator(**kwargs)
26 return block_iter
27
/usr/local/lib/python3.5/dist-packages/chainscan/scan.py in __init__(self, block_iter, height_safety_margin, block_filter, **kwargs)
322 """
323 if block_iter is None:
--> 324 block_iter = TopologicalBlockIterator(**kwargs)
325 self.block_iter = block_iter
326 if height_safety_margin is None:
/usr/local/lib/python3.5/dist-packages/chainscan/scan.py in __init__(self, rawfile_block_iter, **kwargs)
230 """
231 if rawfile_block_iter is None:
--> 232 rawfile_block_iter = RawFileBlockIterator(**kwargs)
233 self.rawfile_block_iter = rawfile_block_iter
234
/usr/local/lib/python3.5/dist-packages/chainscan/scan.py in __init__(self, raw_data_iter, **kwargs)
155 """
156 if raw_data_iter is None:
--> 157 raw_data_iter = RawDataIterator(**kwargs)
158 self.raw_data_iter = raw_data_iter
159
/usr/local/lib/python3.5/dist-packages/chainscan/rawfiles.py in __init__(self, raw_files_iter, use_mmap, **kwargs)
115 """
116 if raw_files_iter is None:
--> 117 raw_files_iter = RawFilesIterator(**kwargs)
118 self.raw_files_iter = raw_files_iter
119 self.use_mmap = use_mmap
.
/usr/local/lib/python3.5/dist-packages/click/utils.py in echo(message, file, nl, err, color)
257
258 if message:
--> 259 file.write(message)
260 file.flush()
261
UnsupportedOperation: not writable
Any ideas?
Thanks in advance.

Categories

Resources