I am trying to test a run method in my class which has init method and takes object as parameter from another class:
class ServePlate(FreeSurferStep):
process_name = "FreeSurfer"
step_name = "ServePlate"
step_cli = "serve"
cpu = 1
mem = 1024
def __init__(self, project, code, args):
super(Stage, self).__init__(project, code, args)
self.next_step = Autorecon1
#classmethod
def get_queue(cls, project_name):
plog = ProcessingLog()
available = plog.get_project_images(project_name, "T1")
attempted = plog.get_step_attempted(project_name, cls.process_name, cls.step_name)
attempted_codes = [row.Code for row in attempted]
todo = [{'ProjectName': project_name, 'Code': row.Code} for row in available if row.Code not in attempted_codes]
return todo
def run(self): #<<<<-- This method is to be tested
source = None
image = ProcessingLog().get_project_image(self.project, self.code)
if image.ImageStore == "Dicom":
dcmtmp = tempfile.mkdtemp()
DicomRepository().fetch_dicoms(self.code, dcmtmp)
first_t1 = os.path.join(dcmtmp, os.listdir(dcmtmp)[0])
niitmp = os.path.join(tempfile.mkdtemp(), 'raw.nii')
cmd = 'dcm2niix -b n -z n -g i -o {} -f raw {}'.format(os.path.dirname(niitmp), first_t1)
self._run_fs_cmd(cmd)
source = niitmp
elif image.ImageStore == "Pre-Processed":
source = [PreProcessedImageRepository().get_image(self.code), ]
if source is None:
raise ProcessingError("Could not find staging data.")
first_t1 = self._copy_files(source)
cmd = 'recon-all -s %(code)s -i %(image)s' % {
"code": self.code,
"image": first_t1
}
self._run_fs_cmd(cmd). #<<<-- I am trying to check value of cmd variable
Here is my test, i am patching first the init method and second _run_fs_cmd frm another class.
class Testfs(unittest.TestCase):
#patch.object(fs.FreeSurferStep, '_run_fs_cmd', spec=True)
# #patch.object(fs.FreeSurferStep, '__init__')
def test_serve(mock_serve):
"""
Serve step test
"""
mock_serve.project = 'TEST_FS'
mock_serve.code = 'Test9001-1a5'
mock_serve.args = ''
mock_stage.return_value = None
FsObj = FreeSurferStep('serve')
stage_obj = Stage(FsObj)
FsObj.run()
#
# stage_obj.run(self)
#
# self.assertEqual(self.cmd, '')
# fs.FreeSurferStep._run_fs_cmd = Mock()
this gives me error. Here even though i am passing no arguments to the run method, it keeps on complaining about more argument being passed. Also patching a class object to be passed to ServePlate method and patching run_fsmethod where the cmd is passed to doesn't seem to work. Do i need to compulsorily mock all other methods being called?
TypeError: test_serve() takes 1 positional argument but 2 were given
TypeError: run() takes 1 positional argument but 3 were given
i got the test working with initializing correctly:
class Testfs(unittest.TestCase):
project = 'TEST'
code = '9001-1a5'
args = 'nogrid'
#patch.object(fs.FreeSurferStep, '_run_fs_cmd', 'put_object', spec=True)
#patch.object(fs.FreeSurferStep, '__init__')
def test_serve(self, mock_test_serve):
"""
Stage step test
"""
mock_test_stage.return_value = None
project = 'TEST'
code = '9001-1a5'
args = 'nogrid'
self.logger = logging.getLogger(__name__)
FsObj = fs.FreeSurferStep('Stage')
stage_obj = fs.Stage(FsObj, code, args)
stage_obj.project = 'Test'
stage_obj.code = '9001-1a5'
stage_obj.run()
however havent got a way to check value passed to `_run_fs_cmd` method
Related
I am relatively new to python and exploring mocking in unit tests. I have the following method to test, calc_scores so I want to mock the response to get_score_json:
def calc_scores(
score_id: str,
):
score_json = get_score_json(score_id)
#Do some calculations...
return {
"total_score": total_score
}
def get_score_json(score_id):
json_obj = call_to_external_service()
return json_obj
And this test, where I want to supply a mock and check it was called in the right way:
def mock_get_score_json(*args, **kwargs):
return {
"scores": [
{"score_1": 123, "score_2":234},
]
}
class Test_Scores:
def test_calc_scores(self):
with mock.patch(
"path.to.calc_scores",
# return_value = mock_get_score_json(),
# new = mock_get_score_json
# side_effect=mock_get_score_json,
new_callable= lambda: mock_get_score_json
) as x:
result = calc_scores(
score_id = 123
)
print(x)
x.assert_called_once()
The test runs and passes, except for the last line where I try assert_called_once(). This fails with the following error:
AttributeError: 'function' object has no attribute 'assert_called_once'
Where am I going wrong? The commented out lines in the args to mock.patch are the options I've tried, but none of them work.
I'm trying to mock the object, But it's not happening can anyone help me
payment_gateway
----payment.py
class Razz:
def __init__(self) -> None:
self.base_url = "https://api.example.com/v1"
self.api_key = os.environ["API_KEY"]
self.api_secret = os.environ["SECRET"]
self.kh_account_number = os.environ["ACCOUNT_NUMBER"]
def create_contact(self):
res = request.post(usrl=f"{base_url}/contact", data=payload, auth(self.api_key,self.api_secret)
return "id"
And am importing this class on another fille i.e event_bank_deails.py
from payment_gateway.payment import Razz, PaymentGatewayErrors
PAYMENT_GATEWAY= Razz()
def update_bank_detials(request: Request) -> Response:
contact_id = PAYMENT_GATEWAY.create_contact(body, event_id) # Creating contact
fund_id = PAYMENT_GATEWAY.create_account(contact_id, body) # Creating fund account on razorpayX
return resposes
TestCases file
#patch("event_bank_details.event_bank_details.Razz")
#pytest.mark.parametrize("input, expected_op", UPDATE_EVENT_BANK_DETAILS_INPUT)
def test_update_event_bank_details(mock_email, mock_object, input, expected_op):
from event_bank_details.event_bank_details import update_bank_detials
# mock_object.return_value.create_contact.return_value = None
# mock_object.return_value.create_account.create_account = None
response = update_bank_detials(input)
response.prepare()
response_dict = response.get()
assert response_dict["statusCode"] == expected_op
And then am writing test cases for the update_bank_details function It's throwing an error invalid API_KEYandAPI_SECREATEHow can I mock theRazzclassinit()` call on update_bank_details function???
import requests
class RadioInfo:
def __init__(self, url, station_num=0):
self.url = url
self.station_num = station_num
def update(self):
radio_info = self.get_json()
self.update_info(radio_info)
def get_json(self):
r = requests.get(self.url)
return r.json()[self.station_num]
def update_info(self, radio_info):
self.radio_station_name = radio_info["station"]["name"]
self.radio_station_description = radio_info["station"]["description"]
self.current_listeners = radio_info["listeners"]["current"]
self.unique_listeners = radio_info["listeners"]["unique"]
self.total_listeners = radio_info["listeners"]["total"]
self.now_playing_playlist = radio_info["now_playing"]["playlist"]
self.now_playing_song_title = radio_info["now_playing"]["song"]["title"]
self.now_playing_song_artist = radio_info["now_playing"]["song"]["artist"]
self.now_playing_song_album = radio_info["now_playing"]["song"]["album"]
###
# What if radio["playing_next"] is None ??
###
self.next_playing_playlist = radio_info["playing_next"]["playlist"]
self.next_playing_song_title = radio_info["playing_next"]["song"]["title"]
self.next_playing_song_artist = radio_info["playing_next"]["song"]["artist"]
self.next_playing_song_album = radio_info["playing_next"]["song"]["album"]
Having the above, trying to figure out how to refactor this, so that I can check if some of the nodes are None and handle this accordingly.
Additionally starting out with unit testing, and besides error handling was thinking about best practices of unit testing this object.
Should I create a method for each property and unit test with pytest.mark.parametrize for example, or is this considered redundant and not necessary ?
I use JDBC.py script run performance testing . grinder log info:
2015-10-14 18:42:40,132 ERROR com-0 thread-24: aborting thread - {}The result of 'TestRunner()' is not callable
net.grinder.scriptengine.jython.JythonScriptExecutionException: The result of 'TestRunner()' is not callable
at net.grinder.scriptengine.jython.JythonScriptEngine.createWorkerRunnable(JythonScriptEngine.java:183) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderProcess$ThreadStarterImplementation$2.create(GrinderProcess.java:784) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderThread.run(GrinderThread.java:90) ~[grinder-core-3.11.jar:na]
at java.lang.Thread.run(Thread.java:744) [na:1.7.0_45]
2015-10-14 18:42:40,132 ERROR com-0 thread-3: aborting thread - {}The result of 'TestRunner()' is not callable
net.grinder.scriptengine.jython.JythonScriptExecutionException: The result of 'TestRunner()' is not callable
at net.grinder.scriptengine.jython.JythonScriptEngine.createWorkerRunnable(JythonScriptEngine.java:183) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderProcess$ThreadStarterImplementation$2.create(GrinderProcess.java:784) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderThread.run(GrinderThread.java:90) ~[grinder-core-3.11.jar:na]
at java.lang.Thread.run(Thread.java:744) [na:1.7.0_45]
I modify script, but still error. Please help check it.
I test script :
# The sorting tes supports a configurable array length.
# It runs the JavaTest.sort method of the JavaTest class.
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from datetime import datetime
from datetime import timedelta
from java.sql import DriverManager
from oracle.jdbc import OracleDriver
########################################
#
# main body of test script starts here
#
########################################
# Get the propeties to access test configuration information
properties = grinder.getProperties()
# The description is a property (instead of a hardcoded string in this script)
#test = Test(1, properties.get("javatest.description"))
test = Test(2, properties.get("javatest.description"))
# select the method for which to collect information
# test.record(WriteMulitpleLittleFile.write)
# initialize data for compressing
# fileName = properties.get("javatest.fileToCompress")
# grinder.logger.info("data file to compress is " + fileName)
# JavaTest.initializeCompression(fileName)
# If the run mode is runOnce, the TestRunner class will
# run once. Otherwise, if the run mode is continuous,
# the TestRunner class will run the test for at least
# the specified duration (but possibly longer)
runMode = properties.get("javatest.runMode")
#WriteMulitpleLittleFile.setParameters(dir, fileSize...)
if runMode == "continuous":
# figure out how long to run the test
m = int(properties.getProperty("javatest.durationMinutes", "0"))
h = int(properties.getProperty("javatest.durationHours", "0"))
d = int(properties.getProperty("javatest.durationDays", "0"))
duration = timedelta(minutes=m,hours=h,days=d)
grinder.logger.info("run mode is continuous, duration is " + str(duration))
elif runMode == "runOnce":
grinder.logger.info("run mode is run once")
duration = timedelta(minutes=0)
else:
grinder.logger.info("run mode not set or not recongized, default to run once")
duration = timedelta(minutes=0)
########################################
#
# The TestRunner class is used by The Grinder to perform the test
#
########################################
#test1 = Test(1, "Database insert")
test2 = Test(2, "Database query")
# Load the Oracle JDBC driver.
DriverManager.registerDriver(OracleDriver())
def getConnection():
return DriverManager.getConnection(
"jdbc:oracle:thin:#den00bvr.us.oracle.com:1521:orcl", "PBPUBLIC", "PBPUBLIC")
def ensureClosed(object):
try: object.close()
except: pass
# One time initialisation that cleans out old data.
connection = getConnection()
statement = connection.createStatement()
#try: statement.execute("drop table grinder_test1126")
#except: pass
#statement.execute("create table grinder_test1126(thread number, run number)")
ensureClosed(statement)
ensureClosed(connection)
class TestRunner:
def __init__(self):
# tid = grinder.threadNumber
# if (grinder.threadNumber % 2 == 0):
# Even threadNumber
# Do insertStatement
# else:
# Odd threadNumber
# Do queryStatement
# def __call__(self):
# self.testRunner()
endTime = datetime.now() + duration
notDone = True
while notDone:
connection = None
insertStatement = None
queryStatement = None
notDone = datetime.now() < endTime
try:
connection = getConnection()
# insertStatement = connection.createStatement()
queryStatement = connection.createStatement()
# test1.record(insertStatement)
# insertStatement.execute("insert into grinder_test1126 values(%d, %d)" %
# (grinder.threadNumber, grinder.runNumber))
test2.record(queryStatement)
queryStatement.execute("select * from employee")
finally:
# ensureClosed(insertStatement)
ensureClosed(queryStatement)
ensureClosed(connection)
According to the documentation,
The TestRunner instance must be callable
A Python object is callable if it defines a call method. Each
worker thread performs a number of runs of the test script, as
configured by the property grinder.runs. For each run, the worker
thread calls its TestRunner; thus the call method can be thought
of as the definition of a run.
Your script requires a call function in order to be classified as callable.
I'm using ipython's parallel features:
client = Client()
client.direct_view().use_dill()
def hostname():
import socket
return socket.gethostname()
# For joblib, we only want one client per host.
joblib_clients = dict(zip(map(lambda x: client[x].apply_sync(hostname), client.ids), client.ids))
lview_joblib = client.load_balanced_view(targets=joblib_clients.values())
dview = client.direct_view()
I also read in data from disk locally and on each engine
%%px --local
store = pd.HDFStore(data_file, 'r')
rows = store.select('results', ['cv_score_mean > 0'])
rows = rows.sort('cv_score_mean', ascending=False)
rows['results_index'] = rows.index
data_model = store.select('data_model')
p = re.compile('_coef$')
feature_set = rows.filter(regex='_coef$').dropna(axis=1, how='all').rename(columns=lambda x: p.sub('',x)).columns
Then I define and instantiate a class on all the engines (but not locally).
%%px
class DataRegression(object):
def __init__(self, **kwargs): ...
...
def regress_z_scores(self, **kwargs):
...
regressions = DataRegression(data_model, feature_set)
There are a few ways I can think to call this function through my load-balanced view:
1) Through a lambda function:
# What exactly is this lambda function passing to the lview?
ar = lview_joblib.map(lambda run: regressions.regress_z_scores(run), runs)
2) By trying to invoke the function directly:
# This fails with a NameError, because regressions.regress_z_scores is not defined
ar = lview_joblib.map(regressions.regress_z_scores, runs)
3) By creating the regressions object locally as well:
%%px --local
class DataRegression(object):
def __init__(self, **kwargs): ...
...
def regress_z_scores(self, **kwargs):
...
regressions = DataRegression(data_model, feature_set)
# And invoking it through the name.
ar = lview_joblib.map(regressions.regress_z_scores, runs)
# Does this mean that the local object gets pickled and passed to the client each time?
In each of these cases, how is the load-balanced view's map function actually executing this function call? Is there a best practice?