ForwardReference NameError when loading a recursive dict in dataclass - python

I'm using marshmallow-dataclass to load a json which represents a sequence of rules where each rule is represented by a LogicalGroup and applies a logical operator on its child expressions, knowing that an expression can itself be a LogicalGroup.
The input dict follows this structure:
import marshmallow_dataclass
from dataclasses import field
from api_handler import BaseSchema
from typing import Sequence, Union, Literal, Type, List, ForwardRef, TypeVar, Generic
filter_input = { "rules" :
[{
"groupOperator" : "and",
"expressions" : [
{ "field": "xxxxx", "operator": "eq", "value": 'level1' },
{ "field": "xxxxx", "operator": "eq", "value": 'm'},
{ "field": "xxxxx", "operator": "eq", "value": "test"},
{
"groupOperator" : "or",
"expressions" : [
{ "field": "xxxx", "operator": "eq", "value": 'level2' },
{ "field": "xxxx", "operator": "eq", "value": 'm' },
{ "field": "xxxx", "operator": "eq", "value": "test" }
]
}
]
}]
}
The dataclasses i'm using for this purpose are the following :
#marshmallow_dataclass.dataclass(base_schema=BaseSchema)
class Expression:
field : str
operator : str
value : str
#marshmallow_dataclass.dataclass(base_schema=BaseSchema)
class LogicalGroup:
group_operator : str
expressions : List[Union['LogicalGroup', Expression]] = field(default_factory=list)
#marshmallow_dataclass.dataclass(base_schema=BaseSchema)
class Filter:
rules: List[LogicalGroup] = field(default_factory=list)
The problem is when i try to load the dict using the Filter dataclass i get the following error
filt = Filter.Schema().load(filter_input)
Traceback (most recent call last):
File "/home/adam/billing/billing/filter/filter.py", line 96, in <module>
filt = Filter.Schema().load(filter_input)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow_dataclass/__init__.py", line 628, in load
all_loaded = super().load(data, many=many, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 725, in load
return self._do_load(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 859, in _do_load
result = self._deserialize(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 667, in _deserialize
value = self._call_and_store(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 496, in _call_and_store
value = getter_func(data)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 664, in <lambda>
getter = lambda val: field_obj.deserialize(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 354, in deserialize
output = self._deserialize(value, attr, data, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 726, in _deserialize
result.append(self.inner.deserialize(each, **kwargs))
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 354, in deserialize
output = self._deserialize(value, attr, data, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 609, in _deserialize
return self._load(value, data, partial=partial)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 592, in _load
valid_data = self.schema.load(value, unknown=self.unknown, partial=partial)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow_dataclass/__init__.py", line 628, in load
all_loaded = super().load(data, many=many, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 725, in load
return self._do_load(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 859, in _do_load
result = self._deserialize(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 667, in _deserialize
value = self._call_and_store(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 496, in _call_and_store
value = getter_func(data)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/schema.py", line 664, in <lambda>
getter = lambda val: field_obj.deserialize(
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 354, in deserialize
output = self._deserialize(value, attr, data, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 726, in _deserialize
result.append(self.inner.deserialize(each, **kwargs))
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow/fields.py", line 354, in deserialize
output = self._deserialize(value, attr, data, **kwargs)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/marshmallow_dataclass/union_field.py", line 56, in _deserialize
typeguard.check_type(attr or "anonymous", result, typ)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/typeguard/__init__.py", line 655, in check_type
expected_type = resolve_forwardref(expected_type, memo)
File "/home/adam/thanos-envv/lib/python3.9/site-packages/typeguard/__init__.py", line 198, in resolve_forwardref
return evaluate_forwardref(maybe_ref, memo.globals, memo.locals, frozenset())
File "/usr/lib/python3.9/typing.py", line 533, in _evaluate
eval(self.__forward_code__, globalns, localns),
File "<string>", line 1, in <module>
NameError: name 'LogicalGroup' is not defined
I'm guessing the problem comes from declaring LogicalGroup as a ForwardRef inside type hint Union, because when i use only
Union['LogicalGroup'] and modify my dict to be a nested dict of LogicalGroups without the Expressions it works fine.
Does someone have any idea on the source of the bug ? Or maybe a proposition to adress this problem in another way ?
Thanks in advance !

Related

[instance segmentation]pixellib fails to remove _background_ label in labelme on custom dataset

I learn pixellib to do instance segmentation at pixellib.
I used labelme to mark 4 categories of objects.According to the document of labelme, I used the label _background_ to mark the parts that do not belong to the object.
The 4 categories of objects are plastic bag,drink bottle,banana,apple.
When I use this code to train my own dataset:
from pixellib.custom_train import instance_custom_training
train_maskrcnn = instance_custom_training()
train_maskrcnn.modelConfig(network_backbone = "resnet101", num_classes=4, batch_size=4, class_names= ["_background_","apple","banana","drinkBottle","plasticBag"])
train_maskrcnn.load_pretrained_model("mask_rcnn_coco.h5")
train_maskrcnn.load_dataset(r'myData')
train_maskrcnn.train_model(num_epochs = 300, augmentation=True, path_trained_models = r"D:/pythonProjects/model")
I find the categories in test.json and train.json is:
"categories": [
{
"supercategory": "apple",
"id": 1,
"name": "apple"
},
{
"supercategory": "_background_",
"id": 2,
"name": "_background_"
},
{
"supercategory": "banana",
"id": 3,
"name": "banana"
},
{
"supercategory": "plasticBag",
"id": 4,
"name": "plasticBag"
},
{
"supercategory": "drinkBottle",
"id": 5,
"name": "drinkBottle"
}
]
And the error message is:
Traceback (most recent call last):
File "D:/pythonProjects/test_mask.py", line 6, in <module>
train_maskrcnn.train_model(num_epochs = 300, augmentation=True, path_trained_models = r"D:/pythonProjects/model")
File "D:\Anaconda38\lib\site-packages\pixellib\custom_train\__init__.py", line 124, in train_model
self.model.train(self.dataset_train, self.dataset_test,models = path_trained_models, augmentation = augmentation,
File "D:\Anaconda38\lib\site-packages\pixellib\instance\mask_rcnn.py", line 2307, in train
self.keras_model.fit(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_v1.py", line 776, in fit
return func.fit(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_generator_v1.py", line 570, in fit
return fit_generator(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_generator_v1.py", line 252, in model_iteration
batch_outs = batch_function(*batch_data)
File "D:\Anaconda38\lib\site-packages\keras\engine\training_v1.py", line 1048, in train_on_batch
x, y, sample_weights = self._standardize_user_data(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_v1.py", line 2323, in _standardize_user_data
return self._standardize_tensors(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_v1.py", line 2351, in _standardize_tensors
x = training_utils_v1.standardize_input_data(
File "D:\Anaconda38\lib\site-packages\keras\engine\training_utils_v1.py", line 642, in standardize_input_data
raise ValueError('Error when checking ' + exception_prefix +
ValueError: Error when checking input: expected input_image_meta to have shape (17,) but got array with shape (18,)
I know that _background_ is a special category and should not be trained as a category, and _background_ should not appear under the node categories in test.json and train.json, so how should I modify my code?

pytest does not find tests when structlog is configured

I have a python project where I use pytest for my unit testing.
Normally if I run the following command from my test folder:
pytest --collect-only
I will get all my tests:
...
40 tests collected
...
Now let's say I define a new class structlogconf.py (Based on this example)
import logging
import logging.config
import structlog
def configure() -> None:
timestamper = structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S")
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.stdlib.add_log_level,
timestamper,
]
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"plain": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=False),
"foreign_pre_chain": pre_chain,
},
"colored": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=True),
"foreign_pre_chain": pre_chain,
},
},
"handlers": {
"default": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "colored",
},
"file": {
"level": "DEBUG",
"class": "logging.handlers.WatchedFileHandler",
"filename": "test.log",
"formatter": "plain",
},
},
"loggers": {
"": {
"handlers": ["default", "file"],
"level": "DEBUG",
"propagate": True,
},
},
}
)
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
If now I run the pytest collect command again, pytest will not recover any test suite which import directly or indirectly the configure() function from the structlogconf.py. So I now obtain someting like:
...
5 tests collected
...
Is there anyone here which know how to use a struct log conf in a way that won't affect my pytest tests dicscovery?
FYI: Here is the stacktrace when running the collect in the problematic scenario:
...
my remaining 5 tests which does not indirectly import my configure() function are showing here
...
================================================================== 5 tests collected in 1.03s ===================================================================
Traceback (most recent call last):
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 269, in wrap_session
session.exitstatus = doit(config, session) or 0
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 322, in _main
config.hook.pytest_collection(session=session)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_hooks.py", line 265, in __call__
return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 60, in _multicall
return outcome.get_result()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_result.py", line 60, in get_result
raise ex[1].with_traceback(ex[2])
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 39, in _multicall
res = hook_impl.function(*args)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 333, in pytest_collection
session.perform_collect()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 634, in perform_collect
self.items.extend(self.genitems(node))
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 811, in genitems
yield from self.genitems(subnode)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 808, in genitems
rep = collect_one_node(node)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\runner.py", line 458, in collect_one_node
rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_hooks.py", line 265, in __call__
return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 55, in _multicall
gen.send(outcome)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 796, in pytest_make_collect_report
out, err = self.read_global_capture()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 718, in read_global_capture
return self._global_capturing.readouterr()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 622, in readouterr
err = self.err.snap()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 479, in snap
self.tmpfile.seek(0)
ValueError: I/O operation on closed file.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 289, in wrap_session
config.notify_exception(excinfo, config.option)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\config\__init__.py", line 1037, in notify_exception
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_hooks.py", line 265, in __call__
return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 60, in _multicall
return outcome.get_result()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_result.py", line 60, in get_result
raise ex[1].with_traceback(ex[2])
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 39, in _multicall
res = hook_impl.function(*args)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 826, in pytest_internalerror
self.stop_global_capturing()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 693, in stop_global_capturing
self._global_capturing.pop_outerr_to_orig()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 573, in pop_outerr_to_orig
out, err = self.readouterr()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 622, in readouterr
err = self.err.snap()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 479, in snap
self.tmpfile.seek(0)
ValueError: I/O operation on closed file.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\Scripts\pytest.exe\__main__.py", line 7, in <module>
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\config\__init__.py", line 185, in console_main
code = main()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\config\__init__.py", line 162, in main
ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main(
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_hooks.py", line 265, in __call__
return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_manager.py", line 80, in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 60, in _multicall
return outcome.get_result()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_result.py", line 60, in get_result
raise ex[1].with_traceback(ex[2])
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\pluggy\_callers.py", line 39, in _multicall
res = hook_impl.function(*args)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 316, in pytest_cmdline_main
return wrap_session(config, _main)
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\main.py", line 311, in wrap_session
config._ensure_unconfigure()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\config\__init__.py", line 991, in _ensure_unconfigure
fin()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 693, in stop_global_capturing
self._global_capturing.pop_outerr_to_orig()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 573, in pop_outerr_to_orig
out, err = self.readouterr()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 622, in readouterr
err = self.err.snap()
File "C:\Users\MY_USER\.conda\envs\MY_ENV\lib\site-packages\_pytest\capture.py", line 479, in snap
self.tmpfile.seek(0)
ValueError: I/O operation on closed file.
This ain't the best solution but after trying to comment various sections from the configuration. I found out the issue originated from the "colored" section which is not critical for usability:
import logging
import logging.config
import structlog
def configure() -> None:
timestamper = structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S")
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.stdlib.add_log_level,
timestamper,
]
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"plain": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=False),
"foreign_pre_chain": pre_chain,
},
# "colored": {
# "()": structlog.stdlib.ProcessorFormatter,
# "processor": structlog.dev.ConsoleRenderer(colors=True),
# "foreign_pre_chain": pre_chain,
# },
},
"handlers": {
"default": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "plain", # <---Change to "plain"
},
"file": {
"level": "ERROR",
"class": "logging.handlers.WatchedFileHandler",
"filename": "test.log",
"formatter": "plain",
},
},
"loggers": {
"": {
"handlers": ["default", "file"],
"level": "ERROR",
"propagate": True,
},
},
}
)
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
I must admit I would prefer a solution that allows me to keep the "colored" section...

Cannot pickle 'dict_keys' object - dict validator with keysrules

UPDATEABLE_FIELDS_KEYS = dict.fromkeys(["subject", "target"]).keys()
update_schema = {
"internal_id": {
"required": True,
"type": "string",
"regex": UUIDV4,
"empty": False,
"excludes": "message_code",
},
"message_code": {
"required": True,
"type": "string",
"empty": False,
"excludes": "internal_id",
"coerce": to_uppercase_fn,
},
"fields": {
"required": True,
"type": "dict",
"keysrules": {
"required": False,
"type": "string",
"allowed": UPDATEABLE_FIELDS_KEYS,
},
},
}
If I validate something like this:
data = {
"message_code": "ABC123",
"fields": {
"this_one_not_valid": "some words",
"subject": "a thing",
"target": "something else"
}
}
validator = Validator(update_schema, allow_unknown=False)
validator(data)
validator.errors
I get this error:
TypeError: cannot pickle 'dict_keys' object
Am I doing something wrong here? It works acceptably in the "correct" cases, but not when I provide an invalid key name.
Stacktrace, some words altered but the code provided above is otherwise the code I am using.
File "/Users/c/Development/ub/app/blueprints/p/p.py", line 201, in update_existing
return jsonify(validator.errors), 422
File "/Users/c/.pyenv/versions/3.8.1/envs/ub/lib/python3.8/site-packages/cerberus/validator.py", line 464, in errors
return self.error_handler(self._errors)
File "/Users/c/.pyenv/versions/3.8.1/envs/ub/lib/python3.8/site-packages/cerberus/errors.py", line 493, in __call__
self.extend(errors)
File "/Users/c/.pyenv/versions/3.8.1/envs/ub/lib/python3.8/site-packages/cerberus/errors.py", line 397, in extend
self.add(error)
File "/Users/c/.pyenv/versions/3.8.1/envs/ub/lib/python3.8/site-packages/cerberus/errors.py", line 510, in add
error = deepcopy(error)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 269, in _reconstruct
state = deepcopy(state, memo)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 229, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 229, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/c/.pyenv/versions/3.8.1/lib/python3.8/copy.py", line 161, in deepcopy
rv = reductor(4)
TypeError: cannot pickle 'dict_keys' object
The only way I could see this being actually broken is my guess above about UPDATEABLE_FIELDS_KEYS
your code is using some_dict.keys() to set that value which returns a keys view (dict_keys type in python)
adjusting the code in your question if I change:
-UPDATEABLE_FIELDS_KEYS = ["subject", "target"]
+UPDATEABLE_FIELDS_KEYS = dict.fromkeys(["subject", "target"]).keys()
I can then reproduce the stacktrace.
The easy fix is to not call .keys() (the only valid case I've seen for calling .keys() in python is to use it as a setlike, every other case I've seen is better done as either (1) iterate over the dictionary directly or (2) use in for containment (3) convert to the type you want via iterator)
In this case, you probably want a list or a tuple or a set, for example:
SOME_DICT = {"subject": 1, "target": 1}
UPDATEABLE_FIELDS_KEYS = frozenset(SOME_DICT)

Strange behaviour solidity contract returning huge uint array

I use web3.py to read out a large amount of data and I cannot see what goes wrong here. getValue() and getValue2(), both can be called from remix.ethereum.org without an error, but when I use the python code I posted, then I can only read out getValue2(), the function getValue() throws an error and it looks like it runs in a gas limit. But since the function throws no errors called from remix.ethereum, I really don't see why there should be such a gas error:
Solidity Contract:
pragma solidity ^0.4.19;
contract TestContract {
uint [1000000] val;
uint [200000] val2;
function TestContract(){
}
function getValue() external view returns(uint [1000000]){
return val;
}
function getValue2() external view returns(uint [200000]){
return val2;
}
}
Python code:
import json
import web3
from web3 import Web3, HTTPProvider
from web3.contract import ConciseContract
# web3
w3 = Web3(HTTPProvider('https://ropsten.infura.io'))
# Instantiate and deploy contract
contractAddress = '0x37c587c2174bd9248f203947d7272bf1b8f91fa9'
with open('testfactory.json', 'r') as abi_definition:
abi = json.load(abi_definition)
contract_instance = w3.eth.contract(contractAddress, abi=abi,ContractFactoryClass=ConciseContract)
arr2=contract_instance.getValue2() #works fine
print(len(arr2))
arr=contract_instance.getValue() #throws an error, posted below
print(len(arr))
testfactory.json:
[
{
"constant": true,
"inputs": [],
"name": "getValue2",
"outputs": [
{
"name": "",
"type": "uint256[200000]"
}
],
"payable": false,
"stateMutability": "view",
"type": "function"
},
{
"constant": true,
"inputs": [],
"name": "getValue",
"outputs": [
{
"name": "",
"type": "uint256[1000000]"
}
],
"payable": false,
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"payable": false,
"stateMutability": "nonpayable",
"type": "constructor"
}
]
Error in pyton:
Traceback (most recent call last):
File "C:\Python36\lib\site-packages\web3\contract.py", line 844, in call_contract_function
output_data = decode_abi(output_types, return_data)
File "C:\Python36\lib\site-packages\eth_abi\abi.py", line 109, in decode_abi
return decoder(stream)
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 102, in __call__
return self.decode(stream)
File "C:\Python36\lib\site-packages\eth_utils\functional.py", line 22, in inner
return callback(fn(*args, **kwargs))
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 140, in decode
yield decoder(stream)
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 102, in __call__
return self.decode(stream)
File "C:\Python36\lib\site-packages\eth_utils\functional.py", line 22, in inner
return callback(fn(*args, **kwargs))
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 198, in decode
yield cls.item_decoder(stream)
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 102, in __call__
return self.decode(stream)
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 165, in decode
raw_data = cls.read_data_from_stream(stream)
File "C:\Python36\lib\site-packages\eth_abi\decoding.py", line 247, in read_data_from_stream
len(data),
eth_abi.exceptions.InsufficientDataBytes: Tried to read 32 bytes. Only got 0 bytes
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/Sebi/PycharmProjects/web3/test.py", line 18, in <module>
arr=contract_instance.getValue()
File "C:\Python36\lib\site-packages\web3\contract.py", line 805, in __call__
return self.__prepared_function(**kwargs)(*args)
File "C:\Python36\lib\site-packages\web3\contract.py", line 866, in call_contract_function
raise_from(BadFunctionCallOutput(msg), e)
File "C:\Python36\lib\site-packages\web3\utils\exception_py3.py", line 2, in raise_from
raise my_exception from other_exception
web3.exceptions.BadFunctionCallOutput: Could not decode contract function call getValue return data 0x for output_types ['uint256[1000000]']
Any suggestions what I could do? Is there a bug in web3.py v4.0?

Python: SyntaxError: unexpected EOF while parsing

I am trying to read a dictionary object from an external file and then read it from another file. (The object inside of the file apparently is not a json file even though the file name has a json extension.)
import json
import ast
with open('remeeting_media-get-response.json', 'r') as data:
s = data.read()
a = ast.literal_eval(s)
type(a)
However, I am getting the following unknown error:
Traceback (most recent call last):
File "/Users/me/Desktop/data/finished/Dashlane/diarization.py", line 8, in <module>
a = ast.literal_eval(s)
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ast.py", line 49, in literal_eval
node_or_string = parse(node_or_string, mode='eval')
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ast.py", line 37, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "<unknown>", line 0
^
SyntaxError: unexpected EOF while parsing
[Finished in 0.1s with exit code 1]
This is the sample content from the file:
{
"lines": [
{
"duration": 1.8899999999999999,
"line": "these cop ooh",
"interval": [
0.0,
1.8899999999999999
],
"speaker": "Speaker_2"
},
{
"duration": 5.9500000000000002,
"line": "[noise] hello [noise]",
"interval": [
2.3199999999999998,
8.2699999999999996
],
"speaker": "Speaker_1"
},
{
"duration": 1.5600000000000001,
"line": "ooh",
"interval": [
2081.6900000000001,
2083.25
],
"speaker": "Speaker_2"
}
]
}
I also tried to load this as a json object and it doesn't recognize it as json.
import json
import ast
with open('remeeting_media-get-response.json', 'r') as data:
raw = json.load(data)
print raw
See the output:
Traceback (most recent call last):
File "/Users/me/Desktop/data/finished/Dashlane/diarization.py", line 7, in <module>
raw = json.load(data)
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/__init__.py", line 291, in load
**kw)
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/__init__.py", line 339, in loads
return _default_decoder.decode(s)
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py", line 364, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/decoder.py", line 382, in raw_decode
raise ValueError("No JSON object could be decoded")
ValueError: No JSON object could be decoded

Categories

Resources