I'm trying to implement some unit testing on some code I am building but I'm seeing this weird behavior where even though I set the return value of a function call to be False, the related code does not execute and thus the assertion instance.fail_json.assert_called_with(msg='Not enough parameters specified.')fails.
Is there something else I need to be setting?
project.py:
def main():
# define the available arguments/parameters that a user can pass
# to the module
module_args = dict(
name=dict(type='str', required=True),
ticktype=dict(type='str'),
path=dict(type='str'),
dbrp=dict(type='str'),
state=dict(type='str', required=True, choices=["present", "absent"]),
enable=dict(type='str', default="no", choices=["yes","no","da","net"])
)
required_if=[
[ "state", "present", ["name", "type", "path", "dbrp", "enabled"] ],
[ "state", "absent", ["name"]]
]
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for exampole, in a subsequent task
result = dict(
changed=False,
original_message='',
message=''
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
return_val = run_module(module)
return_val = True
if return_val is True:
module.exit_json(changed=True, msg="Project updated.")
else:
module.fail_json(changed=True, msg="Not enough parameters found.")
test_project.py:
#patch('library.project.run_module')
#patch('library.project.AnsibleModule')
def test_main_exit_functionality_failure(mock_module, mock_run_module):
"""
project - test_main_exit_functionality - failure
"""
instance = mock_module.return_value
# What happens when the run_module returns false
# that is run_module fails
mock_run_module.return_value = False
project.main()
# AnsibleModule.exit_json should not activated
assert_equals(instance.fail_json.call_count, 0)
#AnsibleModule.fail_json should be called
instance.fail_json.assert_called_with(msg='Not enough parameters
specified.')
Re-read your production code. It sets return_val to True on the line before checking if it's True:
...
return_val = run_module(module)
return_val = True
if return_val is True:
...
return_val is always true no matter what run_module returns, so no matter what you do in your test, the production code will always execute the 'true' branch of the if-else check.
Related
The following function is used within a module to query network devices and is called from multiple scripts I use. The arguments it takes are a nested dictionary (the device IP and creds etc) and a string (the command to run on the device):
def query_devices(hosts, cmd):
results = {
'success' : [],
'failed' : [],
}
for host in hosts:
device = hosts[host]
try:
swp = ConnectHandler(device_type=device['dev_type'],
ip=device['ip'],
username=device['user'],
password=device['pwd'],
secret=device['secret'])
swp.enable()
results[host] = swp.send_command(cmd)
results['success'].append(host)
swp.disconnect()
except (NetMikoTimeoutException, NetMikoAuthenticationException) as e:
results['failed'].append(host)
results[host] = e
return results
I want to reuse all of the code to update a device and the only changes would be:
The function would take the same dictionary but the cmd argument would now be a list of commands.
The following line:
results[host] = swp.send_command(cmd)
would be replaced by:
results[host] = swp.send_config_set(cmd)
I could obviously just replicate the function making those two changes and as it is in a module I reuse, I am only having to do it once but I am still basically repeating a lot of the same code.
Is there a better way to do this as I seem to come across the same issue quite often in my code.
You could just add a check on the changed line:
...
if isinstance(cmd, str):
results[host] = swp.send_command(cmd)
else:
results[host] = swp.send_config_set(cmd)
...
The rest of the function can stay the same and now you can simply call it with either a string or a list of strings...
You could use the unpacking operator to always make cmds an iterable (a tuple, actually) even if it is a single value. That way you could always call send_config_set. Here is a super simplified example to illustrate the concept.
def query_devices(hosts, *cmds):
for one_cmd in cmds:
print(one_cmd)
print('query_devices("hosts", "cmd_1")')
query_devices("hosts", "cmd_1")
print('\nquery_devices("hosts", "cmd_1", "cmd_2", "cmd_3")')
query_devices("hosts", "cmd_1", "cmd_2", "cmd_3")
print('\nquery_devices("hosts", *["cmd_1", "cmd_2", "cmd_3"])')
query_devices("hosts", *["cmd_1", "cmd_2", "cmd_3"])
Output:
query_devices("hosts", "cmd_1")
cmd_1
query_devices("hosts", "cmd_1", "cmd_2", "cmd_3")
cmd_1
cmd_2
cmd_3
query_devices("hosts", *["cmd_1", "cmd_2", "cmd_3"])
cmd_1
cmd_2
cmd_3
I'm trying to call RegisterWaitForSingleObject on a file handle in Python to check if there's data asynchronously. My understanding is that I can create a callback function in Python, pass it to RegisterWaiForSingleObject, and as soon as there's data to read, the callback function will be called.
Here's my current implementation:
def _decl(name, ret=None, args=(), module=kernel32):
fn = getattr(module, name)
fn.restype = ret
fn.argtypes = args
return fn
WAITORTIMERCALLBACK = ctypes.WINFUNCTYPE(
None, # return value: VOID
ctypes.wintypes.LPVOID, # PVOID lpParameter
ctypes.wintypes.BOOL # BOOLEAN TimerOrWaitFired
)
new_wait_object = HANDLE()
RegisterWaitForSingleObject = _decl(
"RegisterWaitForSingleObject",
BOOL,
(ctypes.POINTER(HANDLE), HANDLE, WAITORTIMERCALLBACK, LPVOID, DWORD, DWORD)
)
def waitortimercallback(lp_parameter, timer_fired):
print('Data available')
c_waitortimercallback = WAITORTIMERCALLBACK(waitortimercallback)
result = RegisterWaitForSingleObject(
ctypes.byref(new_wait_object), # phNewWaitObject
overlapped.hEvent, # hObject
c_waitortimercallback, # Callback
None, # Context
timeout_mil, # dwMilliseconds
0, # dwFlags
)
I'm not sure I've done everything (or anything) correctly. But I've had success previously with WaitForSingleObject. Unfortunately there are no examples on the internet for RegisterWaitForSingleObject and Python. I probably defined some types incorrectly.
When I run my code, RegisterWaitForSingleObject returns 1 (success), but as soon as data is written, I receive "Segmentation fault" error.
Would appreciate any input.
General question: How can you prevent that a model needs to be rebuild for each inference request?
I'm trying to develop a web-service that contains multiple trained models which can be used to request a prediction. Producing a results is now very time consuming because the model needs to be rebuild for each request.
The inferring itself only takes 30ms but importing the model takes more than a second.
I'm having difficulty splitting the importing and inference into two separate methods because of the needed session.
The solution i came up with is by using an InteractiveSession that is stored in a variable. On creation of the object the model gets loaded inside of this session that remains open. When a request is submitted this preloaded model is than used to generate the result.
Problem with this solution:
When creating multiple of this objects for different models, multiple Interactive sessions are open at the same time. Tensorflow generate the following warning:
Nesting violated for default stack of <class 'tensorflow.python.framework.ops.Graph'> objects
Any ideas how to manage multiple sessions and preload models?
class model_inference:
def __init__(self, language_name, base_module="models"):
"""
Load a network that can be used to perform inference.
Args:
lang_class (str): The name of an importable language class,
returning an instance of `BaseLanguageModel`. This class
should be importable from `base_module`.
base_module (str): The module from which to import the
`language_name` class.
Attributes:
chkpt (str): The model checkpoint value.
infer_model (g2p_tensor.nmt.model_helper.InferModel):
The language infor_model instance.
"""
language_instance = getattr(
importlib.import_module(base_module), language_name
)()
self.ckpt = language_instance.checkpoint
self.infer_model = language_instance.infer_model
self.hparams = language_instance.hparams
self.rebuild_infer_model()
def rebuild_infer_model(self):
"""
recreate infer model after changing hparams
This is time consuming.
:return:
"""
self.session = tf.InteractiveSession(
graph=self.infer_model.graph, config=utils.get_config_proto()
)
self.model = model_helper.load_model(
self.infer_model.model, self.ckpt, self.session, "infer"
)
def infer_once(self, in_string):
"""
Entrypoint of service, should not contain rebuilding of the model.
"""
in_data = tokenize_input_string(in_string)
self.session.run(
self.infer_model.iterator.initializer,
feed_dict={
self.infer_model.src_placeholder: [in_data],
self.infer_model.batch_size_placeholder: self.hparams.infer_batch_size,
},
)
subword_option = self.hparams.subword_option
beam_width = self.hparams.beam_width
tgt_eos = self.hparams.eos
num_translations_per_input = self.hparams.num_translations_per_input
num_sentences = 0
num_translations_per_input = max(
min(num_translations_per_input, beam_width), 1
)
nmt_outputs, _ = self.model.decode(self.session)
if beam_width == 0:
nmt_outputs = np.expand_dims(nmt_outputs, 0)
batch_size = nmt_outputs.shape[1]
num_sentences += batch_size
for sent_id in range(batch_size):
for beam_id in range(num_translations_per_input):
translation = nmt_utils.get_translation(
nmt_outputs[beam_id],
sent_id,
tgt_eos=tgt_eos,
subword_option=subword_option,
)
return untokenize_output_string(translation.decode("utf-8"))
def __del__(self):
self.session.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
With the help of jdehesa's comments i understood what went wrong.
When not specifying which graph needs to be used. Tensorflow makes a new instance of a graph and adds the operations to it. That's why just changing the InteractiveSession to a normal Session to not nest interactive sessions will throw a new error ValueError: Operation name: "init_all_tables" op: "NoOp" is not an element of this graph.
The use of a InteractiveSession worked because it sets the defined graph to be used as default in stead of creating a new instance. The problem with the InteractiveSession is that its very bad to leave multiple sessions open at the same time. Tensorflow will throw a warning.
The solution was the following:
When changing the InteractiveSession to a normal Session you need to explicitly define in which graph you want to reload the model with model_helper.load_model.
This can be done by defining a context: with self.infer_model.graph.as_default():
The eventual solution was the following:
def rebuild_infer_model(self):
"""
recreate infer model after changing hparams
This is time consuming.
:return:
"""
self.session = tf.Session(
graph=self.infer_model.graph, config=utils.get_config_proto()
)
# added line:
with self.infer_model.graph.as_default(): # the model should be loaded within the same graph as when infering!!
model_helper.load_model(
self.infer_model.model, self.ckpt, self.session, "infer"
)
def infer_once(self, in_string):
"""
Turn an orthographic transcription into a phonetic transcription
The transcription is processed all at once
Long transcriptions may result in incomplete phonetic output
:param in_string: orthographic transcription
:return: string of the phonetic representation
"""
# added line:
with self.infer_model.graph.as_default():
in_data = tokenize_input_string(in_string)
self.session.run(
self.infer_model.iterator.initializer,
feed_dict={
self.infer_model.src_placeholder: [in_data],
self.infer_model.batch_size_placeholder: self.hparams.infer_batch_size,
},
)
subword_option = self.hparams.subword_option
beam_width = self.hparams.beam_width
tgt_eos = self.hparams.eos
num_translations_per_input = self.hparams.num_translations_per_input
num_sentences = 0
num_translations_per_input = max(
min(num_translations_per_input, beam_width), 1
)
nmt_outputs, _ = self.infer_model.model.decode(self.session)
if beam_width == 0:
nmt_outputs = np.expand_dims(nmt_outputs, 0)
batch_size = nmt_outputs.shape[1]
num_sentences += batch_size
for sent_id in range(batch_size):
for beam_id in range(num_translations_per_input):
translation = nmt_utils.get_translation(
nmt_outputs[beam_id],
sent_id,
tgt_eos=tgt_eos,
subword_option=subword_option,
)
return untokenize_output_string(translation.decode("utf-8"))
I'd like to add metadata to individual tests in a TestCase that I've written to use Python's unittest framework. The metadata (a string, really) needs to be carried through the testing process and output to an XML file.
Other than remaining with the test the data isn't going to be used by unittest, nor my test code. (I've got a program that will run afterwards, open the XML file, and go looking for the metadata/string).
I've previously used NUnit which allows one to use C# attribute to do this. Specifically, you can put this above a class:
[Property("SmartArrayAOD", -3)]
and then later find that in the XML output.
Is it possible to attach metadata to a test in Python's unittest?
Simple way for just dumping XML
If all you want to do is write stuff to an XML file after every unit test, just add a tearDown method to your test class (e.g. if you have , give it a).
class MyTest(unittest.TestCase):
def tearDown(self):
dump_xml_however_you_do()
def test_whatever(self):
pass
General method
If you want a general way to collect and track metadata from all your tests and return it at the end, try creating an astropy table in your test class's __init__() and adding rows to it during tearDown(), then extracting a reference to your initialized instances of your test class from unittest, like this:
Step 1: set up a re-usable subclass of unittest.TestCase so we don't have to duplicate the table handling
(put all the example code in the same file or copy the imports)
"""
Demonstration of adding and retrieving meta data from python unittest tests
"""
import sys
import warnings
import unittest
import copy
import time
import astropy
import astropy.table
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class DemoTest(unittest.TestCase):
"""
Demonstrates setup of an astropy table in __init__, adding data to the table in tearDown
"""
def __init__(self, *args, **kwargs):
super(DemoTest, self).__init__(*args, **kwargs)
# Storing results in a list made it convenient to aggregate them later
self.results_tables = [astropy.table.Table(
names=('Name', 'Result', 'Time', 'Notes'),
dtype=('S50', 'S30', 'f8', 'S50'),
)]
self.results_tables[0]['Time'].unit = 'ms'
self.results_tables[0]['Time'].format = '0.3e'
self.test_timing_t0 = 0
self.test_timing_t1 = 0
def setUp(self):
self.test_timing_t0 = time.time()
def tearDown(self):
test_name = '.'.join(self.id().split('.')[-2:])
self.test_timing_t1 = time.time()
dt = self.test_timing_t1 - self.test_timing_t0
# Check for errors/failures in order to get state & description. https://stackoverflow.com/a/39606065/6605826
if hasattr(self, '_outcome'): # Python 3.4+
result = self.defaultTestResult() # these 2 methods have no side effects
self._feedErrorsToResult(result, self._outcome.errors)
problem = result.errors or result.failures
state = not problem
if result.errors:
exc_note = result.errors[0][1].split('\n')[-2]
elif result.failures:
exc_note = result.failures[0][1].split('\n')[-2]
else:
exc_note = ''
else: # Python 3.2 - 3.3 or 3.0 - 3.1 and 2.7
# result = getattr(self, '_outcomeForDoCleanups', self._resultForDoCleanups) # DOESN'T WORK RELIABLY
# This is probably only good for python 2.x, meaning python 3.0, 3.1, 3.2, 3.3 are not supported.
exc_type, exc_value, exc_traceback = sys.exc_info()
state = exc_type is None
exc_note = '' if exc_value is None else '{}: {}'.format(exc_type.__name__, exc_value)
# Add a row to the results table
self.results_tables[0].add_row()
self.results_tables[0][-1]['Time'] = dt*1000 # Convert to ms
self.results_tables[0][-1]['Result'] = 'pass' if state else 'FAIL'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=astropy.table.StringTruncateWarning)
self.results_tables[0][-1]['Name'] = test_name
self.results_tables[0][-1]['Notes'] = exc_note
Step 2: set up a test manager that extracts metadata
def manage_tests(tests):
"""
Function for running tests and extracting meta data
:param tests: list of classes sub-classed from DemoTest
:return: (TextTestResult, Table, string)
result returned by unittest
astropy table
string: formatted version of the table
"""
table_sorting_columns = ['Result', 'Time']
# Build test suite
suite_list = []
for test in tests:
suite_list.append(unittest.TestLoader().loadTestsFromTestCase(test))
combo_suite = unittest.TestSuite(suite_list)
# Run tests
results = [unittest.TextTestRunner(verbosity=1, stream=StringIO(), failfast=False).run(combo_suite)]
# Catch test classes
suite_tests = []
for suite in suite_list:
suite_tests += suite._tests
# Collect results tables
results_tables = []
for suite_test in suite_tests:
if getattr(suite_test, 'results_tables', [None])[0] is not None:
results_tables += copy.copy(suite_test.results_tables)
# Process tables, if any
if len(results_tables):
a = []
while (len(a) == 0) and len(results_tables):
a = results_tables.pop(0) # Skip empty tables, if any
results_table = a
for rt in results_tables:
if len(rt):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
results_table = astropy.table.join(results_table, rt, join_type='outer')
try:
results_table = results_table.group_by(table_sorting_columns)
except Exception:
print('Error sorting test results table. Columns may not be in the preferred order.')
column_names = list(results_table.columns.keys())
alignments = ['<' if cn == 'Notes' else '>' for cn in column_names]
if len(results_table):
rtf = '\n'.join(results_table.pformat(align=alignments, max_width=-1))
exp_res = sum([result.testsRun - len(result.skipped) for result in results])
if len(results_table) != exp_res:
print('ERROR forming results table. Expected {} results, but table length is {}.'.format(
exp_res, len(results_table),
))
else:
rtf = None
else:
results_table = rtf = None
return results, results_table, rtf
Step 3: Example usage
class FunTest1(DemoTest):
#staticmethod
def test_pass_1():
pass
#staticmethod
def test_fail_1():
assert False, 'Meant to fail for demo 1'
class FunTest2(DemoTest):
#staticmethod
def test_pass_2():
pass
#staticmethod
def test_fail_2():
assert False, 'Meant to fail for demo 2'
res, tab, form = manage_tests([FunTest1, FunTest2])
print(form)
print('')
for r in res:
print(r)
for error in r.errors:
print(error[0])
print(error[1])
Sample results:
$ python unittest_metadata.py
Name Result Time Notes
ms
-------------------- ------ --------- ----------------------------------------
FunTest2.test_fail_2 FAIL 5.412e-02 AssertionError: Meant to fail for demo 2
FunTest1.test_fail_1 FAIL 1.118e-01 AssertionError: Meant to fail for demo 1
FunTest2.test_pass_2 pass 6.199e-03
FunTest1.test_pass_1 pass 6.914e-03
<unittest.runner.TextTestResult run=4 errors=0 failures=2>
Should work with python 2.7 or 3.7. You can add whatever columns you want to the table. You can add parameters and stuff to the table in setUp, tearDown, or even during the tests.
Warnings:
This solution accesses a protected attribute _tests of unittest.suite.TestSuite, which can have unexpected results. This specific implementation works as expected for me in python2.7 and python3.7, but slight variations on how the suite is built and interrogated can easily lead to strange things happening. I couldn't figure out a different way to extract references to the instances of my classes that unittest uses, though.
I'd like to be able to query Rally for an existing defect and then copy that defect changing only a couple of fields while maintaining all attachments. Is there a simple way to do this? I tried calling rally.create and passing the existing defect object, but it failed to serialize all members into JSON. Ultimately, it would be nice if pyral was extended to include this kind of functionality.
Instead, I've written some code to copy each python-native attribute of the existing defect and then use .ref for everything else. It seems to be working quite well. I've leveraged Mark W's code for copying attachments and that's working great also. One remaining frustration is that copying the iteration isn't working. When I call .ref on the Iteration attribute, I get this:
>>> s
<pyral.entity.Defect object at 0x029A74F0>
>>> s.Iteration
<pyral.entity.Iteration object at 0x029A7710>
>>> s.Iteration.ref
No classFor item for |UserIterationCapacity|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\python27\lib\site-packages\pyral\entity.py", line 119, in __getattr__
hydrateAnInstance(self._context, item, existingInstance=self)
File "c:\python27\lib\site-packages\pyral\restapi.py", line 77, in hydrateAnInstance
return hydrator.hydrateInstance(item, existingInstance=existingInstance)
File "c:\python27\lib\site-packages\pyral\hydrate.py", line 62, in hydrateInstance
self._setAppropriateAttrValueForType(instance, attrName, attrValue, 1)
File "c:\python27\lib\site-packages\pyral\hydrate.py", line 128, in _setAppropriateAttrValueForType
elements = [self._unravel(element) for element in attrValue]
File "c:\python27\lib\site-packages\pyral\hydrate.py", line 162, in _unravel
return self._basicInstance(thing)
File "c:\python27\lib\site-packages\pyral\hydrate.py", line 110, in _basicInstance
raise KeyError(itemType)
KeyError: u'UserIterationCapacity'
>>>
Does this look like an issue with Rally or perhaps an issue with a custom field that our project admin might have caused? I was able to work around it by building the ref from the oid:
newArtifact["Iteration"] = { "_ref": "iteration/" + currentArtifact.Iteration.oid }
This feels kludgy to me though.
Check out the Python answer (there's 2 answers for it, the other one is for Ruby) to this question:
Rally APIs: How to copy Test Folder and member Test Cases
The answer contains a python script that copies Test Cases, with attachments. Although the script is for Test Cases, the logic should be quite readily adaptable to Defects, as the operations will be fundamentally the same - only the field attributes will differ. The script copies with Attachments, Tags, etc, pretty much the whole artifact.
Final solution including Mark W's code for copying attachments
def getDataCopy( data ):
""" Given a piece of data, figure out how to copy it. If it's a native python type
like a string or numeric, just return the value. If it's a rally object, return
the ref to it. If it's a list, iterate and call ourself recursively for the
list members. """
if isinstance( data, types.ListType ):
copyData = []
for entry in data:
copyData.append( getDataCopy(entry) )
elif hasattr( data, "ref" ):
copyData = { "_ref": data.ref }
else:
copyData = data
return copyData
def getArtifactCopy( artifact ):
""" Build a dictionary based on the values in the specified artifact. This dictionary
can then be passed to a rallyConn.put() call to actually create the new entry in
Rally. Attachments and Tasks must be copied seperately, since they require creation
of additional artifacts """
newArtifact = {}
for attrName in artifact.attributes():
# Skip the attributes that we can't or shouldn't handle directly
if attrName.startswith("_") or attrName == "oid" or attrName == "Iteration" or attrName == "Attachments":
continue
attrValue = getattr( artifact, attrName )
newArtifact[attrName] = getDataCopy( attrValue )
if getattr( artifact, "Iteration", None ) != None:
newArtifact["Iteration"] = { "_ref": "iteration/" + artifact.Iteration.oid }
return newArtifact
def copyAttachments( rallyConn, oldArtifact, newArtifact ):
""" For each attachment in the old artifact, create new attachments and attach them to the new artifact"""
# Copy Attachments
source_attachments = rallyConn.getAttachments(oldArtifact)
for source_attachment in source_attachments:
# First copy the content
source_attachment_content = source_attachment.Content
target_attachment_content_fields = { "Content": base64.encodestring(source_attachment_content) }
try:
target_attachment_content = rallyConn.put( 'AttachmentContent', target_attachment_content_fields )
print "\t===> Copied AttachmentContent: %s" % target_attachment_content.ref
except pyral.RallyRESTAPIError, details:
sys.stderr.write('ERROR: %s \n' % details)
sys.exit(2)
# Next copy the attachment object
target_attachment_fields = {
"Name": source_attachment.Name,
"Description": source_attachment.Description,
"Content": target_attachment_content.ref,
"ContentType": source_attachment.ContentType,
"Size": source_attachment.Size,
"User": source_attachment.User.ref
}
# Attach it to the new artifact
target_attachment_fields["Artifact"] = newArtifact.ref
try:
target_attachment = rallyConn.put( source_attachment._type, target_attachment_fields)
print "\t===> Copied Attachment: '%s'" % target_attachment.Name
except pyral.RallyRESTAPIError, details:
sys.stderr.write('ERROR: %s \n' % details)
sys.exit(2)
def copyTasks( rallyConn, oldArtifact, newArtifact ):
""" Iterate over the old artifacts tasks and create new ones, attaching them to the new artifact """
for currentTask in oldArtifact.Tasks:
newTask = getArtifactCopy( currentTask )
# Assign the new task to the new artifact
newTask["WorkProduct"] = newArtifact.ref
# Push the new task into rally
newTaskObj = rallyConn.put( currentTask._type, newTask )
# Copy any attachments the task had
copyAttachments( rallyConn, currentTask, newTaskObj )
def copyDefect( rallyConn, currentDefect, addlUpdates = {} ):
""" Copy a defect including its attachments and tasks. Add the new defect as a
duplicate to the original """
newArtifact = getArtifactCopy( currentDefect )
# Add the current defect as a duplicate for the new one
newArtifact["Duplicates"].append( { "_ref": currentDefect.ref } )
# Copy in any updates that might be needed
for (attrName, attrValue) in addlUpdates.items():
newArtifact[attrName] = attrValue
print "Copying %s: %s..." % (currentDefect.Project.Name, currentDefect.FormattedID),
newDefect = rallyConn.create( currentDefect._type, newArtifact )
print "done, new item", newDefect.FormattedID
print "\tCopying attachments"
copyAttachments( rallyConn, currentDefect, newDefect )
print "\tCopying tasks"
copyTasks( rallyConn, currentDefect, newDefect )