How to stub an object's __call__ method? - python

I'm using Nose and Fudge for unit testing. Consider the following class:
class Foo():
def __init__(self, some_commandline):
self._some_commandline = commandline
def process(self):
stdout, stderr = self._commandline()
...
And a test:
def test_process_commandline(self):
import StringIO
# Setup
fake_stdout = StringIO.StringIO()
fake_stderr = StringIO.StringIO()
fake_stdio = fake_stdout, fake_stderr
fake_cline = (fudge
.Fake('SomeCommandline')
.is_a_stub()
.provides('__call__')
.returns(fake_stdio))
sut = Foo(fake_cline)
# Exercise
sut.process()
# Verify
...
The error I get is:
...
stdout, stderr = self._commandline()
TypeError: 'Fake' object is not iterable
The code I'm stubbing has a return line that looks like this (the real version of "SomeCommandline")
return stdout_str, stderr_str
Why am I getting the TypeError saying Fake is not iterable, and how do i stub this method with fudge?

The stub should be setup using .is_callable() instead of .provides('__call__'):
fake_cline = (fudge
.Fake('SomeCommandline')
.is_callable()
.returns(fake_stdio))
Also, .is_a_stub() is not needed here because we are stubbing out the method, __call__, directly, which is accessed via the class name SomeCommandLine.

Related

'bool' object is not callable when accessing multiprocessing.reduction?

I am writing a code to parallelize a task using Python 3.x multiprocessing library.
I have two classes, (Content of which is too large to be pasted), and two functions like so:
def abc_1(objekt, raw):
return raw + "changed"
def file_task(file_name):
con_fig = Some_Config()
obj_ect = Some_other_class(con_fig)
pool = Pool(4)
with open(file_name, 'r', encoding='utf-8') as fh:
res = pool.map(partial(abc_1, con_fig), fh, 4)
return res
When I run above code I get this error:
cls = <class 'multiprocessing.reduction.ForkingPickler'>
obj = (0, 0, <function mapstar at 0x10abec9d8>, ((functools.partial(<function abc_1 at 0x10ad98620>, <project.some_file.... 'Indeed, Republican lawyers identified only 300 cases of electoral fraud in the United States in a decade.\n')),), {})
protocol = None
#classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
TypeError: 'bool' object is not callable
However, If I change my two functions like so, where I don't need to send object of a class to my function abc_1 which I want parallelized, it works fine:
def abc_1(raw):
return raw + "changed"
def file_task(file_name):
con_fig = Some_Config()
obj_ect = Some_other_class(con_fig)
pool = Pool(4)
with open(file_name, 'r', encoding='utf-8') as fh:
res = pool.map(abc_1, fh, 4)
return res
I would think "partial" is the culprit but even this works:
def file_task(file_name):
con_fig = Some_Config()
obj_ect = Some_other_class(con_fig)
pool = Pool(4)
with open(file_name, 'r', encoding='utf-8') as fh:
res = pool.map(partial(abc_1), fh, 4)
return res
Since, I haven't provided the content that I have in my classes, I understand his makes answering my question, hard.
I have gone through several such posts: TypeError: 'bool' object is not callable while creating custom thread pool
Nothing of this sort is happening in my code, not even in my classes.
Could my initializations in my class be leading to the error I am getting ? What else could be causing this ? What are some things to keep in mind which passing class instance to a function during multiprocessing ?
It will really help if someone can point me in right direction.

How do I mock an open(...).write() without getting a 'No such file or directory' error?

I've based my solution on:
How do I mock an open used in a with statement (using the Mock framework in Python)?,
AttributeError: <module '__main__' from [..] does not have the attribute 'open',
http://www.voidspace.org.uk/python/mock/helpers.html#mock.mock_open
I have a class, which I can instantiate, which writes to a file. I'm trying to test it, but I'm having problems mocking open(). I'm using the following as the smallest piece of code, which can
import os
import unittest
from unittest.mock import mock_open, patch
__author__ = 'drews'
class MockPathExists(object):
def __init__(self, return_value):
self.received_args = None
self.return_value = return_value
def __call__(self, *args, **kwargs):
self.received_args = args
return self.return_value
class WriteData:
def __init__(self, dir, name='World'):
self.name = name
self.dir = dir
def dump(self):
if os.path.exists(self.dir):
with open('{0}/output.text'.format(self.dir), 'w+') as fp:
fp.write('Hello, {0}!'.format(self.name))
class TestListWindowsPasswords(unittest.TestCase):
def setUp(self):
self._orig_pathexists = os.path.exists
os.path.exists = MockPathExists(True)
def test_dump(self):
m = mock_open()
with patch.object(WriteData, 'open', m, create=True):
data_writer = WriteData(
dir='/my/path/not/exists',
name='Foo'
)
data_writer.dump()
self.assertEqual(os.path.exists.received_args[0], '/my/path/not/exists/output.text')
m.assert_called_once_with('/my/path/not/exists/output.text', 'w+')
handle = m()
handle.write.assert_called_once_with('Hello, Foo!')
def tearDown(self):
os.path.exists = self._orig_pathexists
When I run this, I get the following error:
Error
Traceback (most recent call last):
File "/Users/drews/Development/tool/tests/test_mockopen.py", line 41, in test_dump
data_writer.dump()
File "/Users/drews/Development/tool/tests/test_mockopen.py", line 25, in dump
with open('{0}/output.text'.format(self.dir), 'w+') as fp:
FileNotFoundError: [Errno 2] No such file or directory: '/my/path/not/exists/output.text'
How can I mock open(), so that it just returns a file_pointer, and doesn't try to interact with the file system at all?
Mock builtins.open (or module.open, module = the module name that contains WriteData) with the mock_open:
import builtins
class TestListWindowsPasswords(unittest.TestCase):
def setUp(self):
self._orig_pathexists = os.path.exists
os.path.exists = MockPathExists(True)
def test_dump(self):
with patch('builtins.open', unittest.mock.mock_open()) as m:
data_writer = WriteData(
dir='/my/path/not/exists',
name='Foo'
)
data_writer.dump()
self.assertEqual(os.path.exists.received_args[0], '/my/path/not/exists') # fixed
m.assert_called_once_with('/my/path/not/exists/output.text', 'w+')
handle = m()
handle.write.assert_called_once_with('Hello, Foo!')
You can use the __enter__ magic method to simulate which:
from unittest.mock import patch, MagicMock, call, mock_open
#patch('os')
#patch('builtins.open', new_callable=mock_open())
def test_dump(self, mock_open_file, mock_os):
data_writer = WriteData(dir='/my/path/not/exists', name='Foo')
mock_os.path.exists.assert_called_once_with('/my/path/not/exists')
mock_open_file.assert_called_once_with('/my/path/not/exists/output.text', 'w+')
mock_open_file.return_value.__enter__().write.assert_called_once_with('Hello, Foo!')
Hope this helps!

Python Multiprocessing: AttributeError: 'Test' object has no attribute 'get_type'

short short version:
I am having trouble parallelizing code which uses instance methods.
Longer version:
This python code produces the error:
Error
Traceback (most recent call last):
File "/Users/gilzellner/dev/git/3.2.1-build/cloudify-system-tests/cosmo_tester/test_suites/stress_test_openstack/test_file.py", line 24, in test
self.pool.map(self.f, [self, url])
File "/Users/gilzellner/.virtualenvs/3.2.1-build/lib/python2.7/site-packages/pathos/multiprocessing.py", line 131, in map
return _pool.map(star(f), zip(*args)) # chunksize
File "/Users/gilzellner/.virtualenvs/3.2.1-build/lib/python2.7/site-packages/multiprocess/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/Users/gilzellner/.virtualenvs/3.2.1-build/lib/python2.7/site-packages/multiprocess/pool.py", line 567, in get
raise self._value
AttributeError: 'Test' object has no attribute 'get_type'
This is a simplified version of a real problem I have.
import urllib2
from time import sleep
from os import getpid
import unittest
from pathos.multiprocessing import ProcessingPool as Pool
class Test(unittest.TestCase):
def f(self, x):
print urllib2.urlopen(x).read()
print getpid()
return
def g(self, y, z):
print y
print z
return
def test(self):
url = "http://nba.com"
self.pool = Pool(processes=1)
for x in range(0, 3):
self.pool.map(self.f, [self, url])
self.pool.map(self.g, [self, url, 1])
sleep(10)
I am using pathos.multiprocessing due to the recommendation here:
Multiprocessing: Pool and pickle Error -- Pickling Error: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
Before using pathos.multiprocessing, the error was:
"PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed"
You're using multiprocessing map method incorrectly.
According to python docs:
A parallel equivalent of the map() built-in function (it supports only
one iterable argument though).
Where standard map:
Apply function to every item of iterable and return a list of the
results.
Example usage:
from multiprocessing import Pool
def f(x):
return x*x
if __name__ == '__main__':
p = Pool(5)
print(p.map(f, [1, 2, 3]))
What you're looking for is apply_async method:
def test(self):
url = "http://nba.com"
self.pool = Pool(processes=1)
for x in range(0, 3):
self.pool.apply_async(self.f, args=(self, url))
self.pool.apply_async(self.g, args=(self, url, 1))
sleep(10)
The error indicates you are trying to read an attribute which is not defined for the object Test.
AttributeError: 'Test' object has no attribute 'get_type'"
In your class test, you haven't defined get_type method or any other attribute hence the error.

Python unittesting initiate values

Sorry if this question is stupid. I created an unittest class which needs to take given inputs and outputs from outside. Thus, I guess these values should be initiated. However, I met some errors in the following code:
CODE:
import unittest
from StringIO import StringIO
##########Inputs and outputs from outside#######
a=[1,2]
b=[2,3]
out=[3,4]
####################################
def func1(a,b):
return a+b
class MyTestCase(unittest.TestCase):
def __init__(self,a,b,out):
self.a=a
self.b=b
self.out=out
def testMsed(self):
for i in range(self.tot_iter):
print i
fun = func1(self.a[i],self.b[i])
value = self.out[i]
testFailureMessage = "Test of function name: %s iteration: %i expected: %i != calculated: %i" % ("func1",i,value,fun)
self.assertEqual(round(fun,3),round(value,3),testFailureMessage)
if __name__ == '__main__':
f = MyTestCase(a,b,out)
from pprint import pprint
stream = StringIO()
runner = unittest.TextTestRunner(stream=stream, verbosity=2)
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
print 'Tests run', result.testsRun
However, I got the following error
Traceback (most recent call last):
File "C:testing.py", line 33, in <module>
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
File "C:\Python27\lib\unittest\loader.py", line 310, in makeSuite
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
File "C:\Python27\lib\unittest\loader.py", line 50, in loadTestsFromTestCase
if issubclass(testCaseClass, suite.TestSuite):
TypeError: issubclass() arg 1 must be a class
Can anyone give me some suggestions? Thanks!
The root of the problem is this line,
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
unittest.makeSuite expects a class, not an instance of a class. So just MyTestCase, not MyTestCase(a, b, out). This means that you can't pass parameters to your test case in the manner you are attempting to. You should probably move the code from init to a setUp function. Either access a, b, and out as globals inside setUp or take a look at this link for information regarding passing parameters to a unit test.
By the way, here is the source file within python where the problem originated. Might be informative to read.

How to send a function to a remote Pyro object

I am trying to set up some code using Pyro to process python code functions on a remote host and get results back. After starting the name server, i would execute this code on the remote host (actually still on localhost):
import Pyro4
class Server(object):
def evaluate(self, func, args):
return func(*args)
def main():
server = Server()
Pyro4.Daemon.serveSimple(
{
server: "server"
},
ns=True)
if __name__ == '__main__':
main()
On the client side i have this code, which is an example of the behaviour i am trying to set up.
import Pyro4
remoteServer = Pyro4.Proxy('PYRONAME:server')
def square(x):
return x**2
print remoteServer.evaluate(square, 4)
However, this code results in the following exception:
/usr/lib/python2.7/site-packages/Pyro4/core.py:155: UserWarning: HMAC_KEY not set,
protocol data may not be secure
warnings.warn("HMAC_KEY not set, protocol data may not be secure")
Traceback (most recent call last):
File "/home/davide/Projects/rempy/example-api-pyro.py", line 7, in <module>
print remoteServer.evaluate(square, 4)
File "/usr/lib/python2.7/site-packages/Pyro4/core.py", line 149, in __call__
return self.__send(self.__name, args, kwargs)
File "/usr/lib/python2.7/site-packages/Pyro4/core.py", line 289, in _pyroInvoke
raise data
AttributeError: 'module' object has no attribute 'square'
It seems to me that the function object is pickled correctly and is sent to the Server instance on the remote host, but there is some problem in the namespace.
How can i solve this problem?
Thanks
I think i know your problem:
the module the function is defiined in is called
'__main__'
it exists in all running versions of python.
pickle does not transfer the source code but a reference
__main__.square
so you have two possibilities:
source square out and make the main module as short as possible such as:
# main.py
def square(x):
return x**2
import Pyro4
def main():
remoteServer = Pyro4.Proxy('PYRONAME:server')
print remoteServer.evaluate(square, 4)
and:
# __main__.py
import main
main.main()
Then the server can import exactly the same module from the file.
or create a module with my code:
class ThisShallNeverBeCalledError(Exception):
pass
class _R(object):
def __init__(self, f, *args):
self.ret = (f, args)
def __reduce__(self):
return self.ret
def __call__(self, *args):
raise ThisShallNeverBeCalledError()
#classmethod
def fromReduce(cls, value):
ret = cls(None)
ret.ret = value
return ret
def dump_and_load(obj):
'''pickle and unpickle the object once'''
s = pickle.dumps(obj)
return pickle.loads(s)
# this string creates an object of an anonymous type that can
# be called to create an R object or that can be reduced by pickle
# and creates another anonymous type when unpickled
# you may not inherit from this MetaR object because it is not a class
PICKLABLE_R_STRING= "type('MetaR', (object,), " \
" {'__call__' : lambda self, f, *args: "\
" type('PICKLABLE_R', "\
" (object,), "\
" {'__reduce__' : lambda self: (f, args), "\
" '__module__' : 'pickleHelp_', "\
" '__name__' : 'PICKLABLE_R', "\
" '__call__' : lambda self: None})(), "\
" '__reduce__' : lambda self: "\
" self(eval, meta_string, "\
" {'meta_string' : meta_string}).__reduce__(), "\
" '__module__' : 'pickleHelp_', "\
" '__name__' : 'R'})()".replace(' ', '')
PICKLABLE_R = _R(eval, PICKLABLE_R_STRING, \
{'meta_string' : PICKLABLE_R_STRING})
R = dump_and_load(PICKLABLE_R)
del PICKLABLE_R, PICKLABLE_R_STRING
PICKLABLE___builtins__ = R(vars, R(__import__, '__builtin__'))
PICKLABLE_FunctionType = R(type, R(eval, 'lambda:None'))
##R.__module__ = __name__
##R.__name__ = 'PICKLABLE_R'
def packCode(code, globals = {}, add_builtins = True, use_same_globals = False, \
check_syntax = True, return_value_variable_name = 'obj',
__name__ = __name__ + '.packCode()'):
'''return an object that executes code in globals when unpickled
use_same_globals
if use_same_globals is True all codes sent through
one pickle connection share the same globals
by default the dont
return_value_variable_name
if a variable with the name in return_value_variable_name exists
in globals after the code execution
it is returned as result of the pickling operation
if not None is returned
__name__
'''
if check_syntax:
compile(code, '', 'exec')
# copying locals is important
# locals is transferred through pickle for all code identical
# copying it prevents different code from beeing executed in same globals
if not use_same_globals:
globals = globals.copy()
if add_builtins:
globals['__builtins__'] = PICKLABLE___builtins__
globals.setdefault('obj', None)
# get the compilation code
# do not marshal or unmarshal code objects because the platforms may vary
code = R(compile, code, __name__, 'exec')
# the final object that can reduce, dump and load itself
obj = R(R(getattr, tuple, '__getitem__'), (
R(R(PICKLABLE_FunctionType, code, globals)),
R(R(getattr, type(globals), 'get'), globals, \
returnValueVariableName, None)
), -1)
return obj
and then send this to the other side:
packCode('''
def square(...):
...
''', return_value_variable_name = 'square')
and the function will come out on the other side, no module code is needed to transefer this python function to the other server side.
If something does not work out please tell me.

Categories

Resources