Cannot put object on Queue - python

I want to put an instance of scapy.layers.dhcp.BOOTP on a multiprocessing.Queue. Every time I call put() the following exception occures:
Traceback (most recent call last):
File "/usr/lib/python2.6/multiprocessing/queues.py", line 242, in _feed
send(obj)
PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
Of cause trying to pickle the instance directly using pickle.dumps() also fails. But why is this class not picklable?
For all those who don't have scapy installed:
class BOOTP(Packet):
name = "BOOTP"
fields_desc = [ ByteEnumField("op",1, {1:"BOOTREQUEST", 2:"BOOTREPLY"}),
ByteField("htype",1),
ByteField("hlen",6),
ByteField("hops",0),
IntField("xid",0),
ShortField("secs",0),
FlagsField("flags", 0, 16, "???????????????B"),
IPField("ciaddr","0.0.0.0"),
IPField("yiaddr","0.0.0.0"),
IPField("siaddr","0.0.0.0"),
IPField("giaddr","0.0.0.0"),
Field("chaddr","", "16s"),
Field("sname","","64s"),
Field("file","","128s"),
StrField("options","") ]
def guess_payload_class(self, payload):
if self.options[:len(dhcpmagic)] == dhcpmagic:
return DHCP
else:
return Packet.guess_payload_class(self, payload)
def extract_padding(self,s):
if self.options[:len(dhcpmagic)] == dhcpmagic:
# set BOOTP options to DHCP magic cookie and make rest a payload of DHCP options
payload = self.options[len(dhcpmagic):]
self.options = self.options[:len(dhcpmagic)]
return payload, None
else:
return "", None
def hashret(self):
return struct.pack("L", self.xid)
def answers(self, other):
if not isinstance(other, BOOTP):
return 0
return self.xid == other.xid
Are there any other ways to "transport" this instance to another subprocess?

Well, the problem is that you can't pickle the function type. It's what you get when you do type(some_user_function). See this:
>>> import types
>>> pickle.dumps(types.FunctionType)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'picke' is not defined
>>> pickle.dumps(types.FunctionType)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python26\lib\pickle.py", line 1366, in dumps
Pickler(file, protocol).dump(obj)
File "C:\Python26\lib\pickle.py", line 224, in dump
self.save(obj)
File "C:\Python26\lib\pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "C:\Python26\lib\pickle.py", line 748, in save_global
(obj, module, name))
pickle.PicklingError: Can't pickle <type 'function'>: it's not found as __built
n__.function
So such a function type is stored somewhere on the object you try to send. It's not in the code you pasted, so i guess it's on the superclass.
Maybe you can simply send all the arguments required to create a instance of scapy.layers.dhcp.BOOTP instead of the instance to avoid the problem?

The other thing that may help to diagnose problems like these is to use the pickle module instead of cPickle (which must be getting used implicitly by queues.py)
I had a similar situation, getting a completely unhelpful message,
Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
I wandered into the debugger, found the object being pickled, and tried passing it to
pickle.dump(myobj,open('outfile','w'),-1)
and got a much more helpful:
PicklingError: Can't pickle <function findAllRefs at 0x105809f50>:
it's not found as buildsys.repoclient.findAllRefs
Which pointed much more directly at the problematic code.

A solution I use is to str the packet and then put it on the queue...

Related

AttributeError: 'str' object has no attribute 'errno'

I placed a ClientConnectionError exception in a multiprocessing.Queue that was generated by asyncio. I did this to pass an exception generated in asyncio land back to a client in another thread/process.
My assumption is that this exception occurred during the deserialization process reading the exception out of the queue. It looks pretty much impossible to reach otherwise.
Traceback (most recent call last):
File "model_neural_simplified.py", line 318, in <module>
main(**arg_parser())
File "model_neural_simplified.py", line 314, in main
globals()[command](**kwargs)
File "model_neural_simplified.py", line 304, in predict
next_neural_data, next_sample = reader.get_next_result()
File "/project_neural_mouse/src/asyncs3/s3reader.py", line 174, in get_next_result
result = future.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "model_neural_simplified.py", line 245, in read_sample
f_bytes = s3f.read(read_size)
File "/project_neural_mouse/src/asyncs3/s3reader.py", line 374, in read
size, b = self._issue_request(S3Reader.READ, (self.url, size, self.position))
File "/project_neural_mouse/src/asyncs3/s3reader.py", line 389, in _issue_request
response = self.communication_channels[uuid].get()
File "/usr/lib/python3.6/multiprocessing/queues.py", line 113, in get
return _ForkingPickler.loads(res)
File "/usr/local/lib/python3.6/dist-packages/aiohttp/client_exceptions.py", line 133, in __init__
super().__init__(os_error.errno, os_error.strerror)
AttributeError: 'str' object has no attribute 'errno'
I figure it's a long shot to ask, but does anyone know anything about this issue?
Python 3.6.8, aiohttp.__version__ == 3.6.0
Update:
I managed to reproduce the issue (credit to Samuel in comments for improving the minimal reproducible test case, and later xtreak at bugs.python.org for furthing distilling it to a pickle-only test case):
import pickle
ose = OSError(1, 'unittest')
class SubOSError(OSError):
def __init__(self, foo, os_error):
super().__init__(os_error.errno, os_error.strerror)
cce = SubOSError(1, ose)
cce_pickled = pickle.dumps(cce)
pickle.loads(cce_pickled)
./python.exe ../backups/bpo38254.py
Traceback (most recent call last):
File "/Users/karthikeyansingaravelan/stuff/python/cpython/../backups/bpo38254.py", line 12, in <module>
pickle.loads(cce_pickled)
File "/Users/karthikeyansingaravelan/stuff/python/cpython/../backups/bpo38254.py", line 8, in __init__
super().__init__(os_error.errno, os_error.strerror)
AttributeError: 'str' object has no attribute 'errno'
References:
https://github.com/aio-libs/aiohttp/issues/4077
https://bugs.python.org/issue38254
OSError has a custom __reduce__ implementation; unfortunately, it's not subclass friendly for subclasses that don't match the expected arguments. You can see the intermediate state of the pickling by calling __reduce__ manually:
>>> SubOSError.__reduce__(cce)
(modulename.SubOSError, (1, 'unittest'))
The first element of the tuple is the callable to call, the second is the tuple of arguments to pass. So when it tries to recreate your class, it does:
modulename.SubOSError(1, 'unittest')
having lost the information about the OSError you were originally created with.
If you must accept arguments that don't match what OSError.__reduce__/OSError.__init__ expects, you're going to need to write your own __reduce__ override to ensure the correct information is pickled. A simple version might be:
class SubOSError(OSError):
def __init__(self, foo, os_error):
self.foo = foo # Must preserve information for pickling later
super().__init__(os_error.errno, os_error.strerror)
def __reduce__(self):
# Pickle as type plus tuple of args expected by type
return type(self), (self.foo, OSError(*self.args))
With that design, SubOSError.__reduce__(cce) would now return:
(modulename.SubOSError, (1, PermissionError(1, 'unittest')))
where the second element of the tuple is the correct arguments needed to recreate the instance (the change from OSError to PermissionError is expected; OSError actually returns its own subclasses based on the errno).
This issue was fixed and merged to master in aiohttp on 25 Sep 2019. I'll update this answer in the future if I note a version that the fix goes into (feel free to edit this answer in the future to note a version containing this update).
Git issue with the fix:
https://github.com/aio-libs/aiohttp/issues/4077

__name__ attribute in Python for Win32_ComputerSystem?

I'm trying to get the name of a WMI win32 class. But the __name__ attribute is not defined for it.
>> import wmi
>> machine = wmi.WMI()
>> machine.Win32_ComputerSystem.__name__
I get the following error:
Traceback (most recent call last):
File "<pyshell#21>", line 1, in <module>
machine.Win32_ComputerSystem.__name__
File "C:\Python27\lib\site-packages\wmi.py", line 796, in __getattr__
return _wmi_object.__getattr__ (self, attribute)
File "C:\Python27\lib\site-packages\wmi.py", line 561, in __getattr__
return getattr (self.ole_object, attribute)
File "C:\Python27\lib\site-packages\win32com\client\dynamic.py", line 457, in __getattr__
raise AttributeError(attr)
AttributeError: __name__
I thought that the __name__ attribute is defined for all Python functions, so I don't know what the problem is here. How is it possible that this function doesn't have that attribute?
OK, The reason that I thought it was a method is because machine.Win32_ComputerSystem() is defined, but I guess that isn't enough for something to be a method. I realise that it isn't a method.
However, this doesn't work:
>> machine.Win32_ComputerSystem.__class__.__name__
'_wmi_class'
I want it to return 'Win32_ComputerSystem'. How can I do this?
From what I can tell looking at the documentation (specifically, based on this snippet), wmi.Win32_ComputerSystem is a class, not a method. If you want to get its name you could try:
machine.Win32_ComputerSystem.__class__.__name__
I've found a way to get the output that I want, however it doesn't satisfy me.
repr(machine.Win32_ComputerSystem).split(':')[-1][:-1]
returns: 'Win32_ComputerSystem'
There must be a more Pythonic way to do this.

How can I load a password-protected private key from a .pem file with M2Crypto?

I have a password-protected private key in a .pem file; I want to use it to sign requests to a remote server. I'm able to load the key and enter the passphrase after being prompted for it:
python
>>> import M2Crypto
>>> pk = M2Crypto.RSA.load_key('private.pem')
Enter passphrase:
>>>
However, I need this for a server process which is restarted every morning, and thus the passphrase must be passed automatically somehow. The load_key method supports a callback argument for this purpose, so I tried several variants of:
>>> def gimmepw():
... return 'mysecret'
...
>>> pk = M2Crypto.RSA.load_key('private.pem', gimmepw)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "/usr/local/Plone/Python-2.4/.../M2Crypto/RSA.py", line 351, in load_key
return load_key_bio(bio, callback)
File "/usr/local/Plone/Python-2.4/.../M2Crypto/RSA.py", line 372, in load_key_bio
rsa_error()
File "/usr/local/Plone/Python-2.4/.../M2Crypto/RSA.py", line 302, in rsa_error
raise RSAError, m2.err_reason_error_string(m2.err_get_error())
M2Crypto.RSA.RSAError: bad password read
>>>
(replace "..." by "lib/python2.4/site-packages")
What am I doing wrong?
This is due to the lack of parameter support in your callback function. As it will be called with at least one parameter, a TypeError exception will occurred (which is catched by M2Crypto).
>>> def gimmepw(*args):
... print 'args:', repr(args)
... return 'test'
...
>>> M2Crypto.RSA.load_key('key.pem', gimmepw)
args: (0,)
<M2Crypto.RSA.RSA instance at 0xb6e8050c>
You should try:
def gimmepw(*args):
return 'mysecret'
One caveat: As of Python 2.7, the return value of your callback method must return a str type.
For example, a unicode type will error out in the same way.
>>> def gimmepw(*args):
... return u'test'
...
>>> M2Crypto.RSA.load_key('key.pem', gimmepw)
Traceback (most recent call last):
File "test_intuit_data.py", line 76, in <module>
intuit_rsa_key = RSA.load_key(file='key.pem', callback=gimmepw)
File "lib/python2.7/site-packages/M2Crypto/RSA.py", line 351, in load_key
return load_key_bio(bio, callback)
File "lib/python2.7/site-packages/M2Crypto/RSA.py", line 372, in load_key_bio
rsa_error()
File "lib/python2.7/site-packages/M2Crypto/RSA.py", line 302, in rsa_error
raise RSAError, m2.err_reason_error_string(m2.err_get_error())
M2Crypto.RSA.RSAError: bad password read
If you are using any input other than a str type, be sure to cast to a str appropriately:
>>> def gimmepw(*args):
... return str(u'test')
...
>>> M2Crypto.RSA.load_key('key.pem', gimmepw)
<M2Crypto.RSA.RSA instance at 0xb6e8050c>

Throwing a custom error in a module meant to be imported by arbitrary modules (library)

I'm building a python library that implements a task queue. If for whatever reason the thread that processes the tasks dies, and the option to restart the thread isn't set, I need to throw an exception.
Is this something that I should just throw a RuntimeException for? I don't want to throw a custom exception since they'd have to import that, but I'm not sure how throw exceptions to arbitrary calling code is best implemented.
Throw a custom exception. If the user of the library needs to catch that exception, they can import it.
For instance, take pickle.PicklingError:
>>> import pickle
>>> pickle.dumps(type(None))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\pickle.py", line 1374, in dumps
Pickler(file, protocol).dump(obj)
File "C:\Python27\lib\pickle.py", line 224, in dump
self.save(obj)
File "C:\Python27\lib\pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "C:\Python27\lib\pickle.py", line 748, in save_global
(obj, module, name))
pickle.PicklingError: Can't pickle <type 'NoneType'>: it's not found as __builtin__.NoneType
>>> try:
... pickle.dumps(type(None))
... except pickle.PicklingError:
... print 'Oops.'
...
Oops.

Django: "TypeError: [] is not JSON serializable" Why?

How can this be that this error was raised? I entered this:
def json(self):
return json.dumps(
{
'items': self.items
}
)
and got that error (because self.items was an empty queryset (Django)
but then,
def json(self):
return json.dumps(
{
'items': [] # Pass in empty list to prove that the error was idiotic.
}
)
worked fine (which at least proves that the error message is worthless)
Is this because the queryset defines repr() and returns '[]' as a string when it's empty or something ridiculous like that?
Querysets are not serializable out-of-the-box. If you try list(self.items) instead of just self.items, that should work as long as the items themselves are JSON-serializable.
Update: It will raise an exception even if it isn't empty. I don't think it'll be accepted as a Django bug, though of course you can try; the simplest answer is to force evaluation using list(qs), as I've already said.
This is very frustrating. Django's serialization complains about everything that isn't a query set and json.dumps complains about objects from Django's ORM support.
>>> from cluster.models import Account
>>> import json
>>> json.dumps(Account.objects.all()[0])
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/encoder.py", line 201, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/encoder.py", line 264, in iterencode
return _iterencode(o, 0)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/json/encoder.py", line 178, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: <Account: 9de5-2653-000d-81a3 => foo#bar.net> is not JSON serializable
Versus
>>> serializers.serialize("json", [clusters])
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/django/core/serializers/__init__.py", line 91, in serialize
s.serialize(queryset, **options)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/django/core/serializers/base.py", line 41, in serialize
for field in obj._meta.local_fields:
AttributeError: 'QuerySet' object has no attribute '_meta'
As Vinay pointed out, even if you cast to a list, serialization often still fails. For me, serialization fails on DateTimeField elements (datetime.datetime objects), even if I ask for a ValuesQuerySet (list-like) with .values(). The solution for me was a simple comprehension.
json.dumps([str(obj) for obj in Model.objects.values()]);
In your case, that would be
return json.dumps({k: str(v) for k, v in self.__dict__.items()})
The magic of str saves the day. The repr built-in may also be helpful if you need object type information in your serialization.

Categories

Resources