I have 2 doit tasks, one having a dependency on the other. For example:
def task_deploy():
return {
'actions': ['do some deploy commands'],
'file_dep': ['dist'],
'params': [{'name': 'projectName',
'short': 'p',
'long': 'projectName',
'default': 'project',
'type': str,
'help': 'The project name to deploy.'}]
}
def task_create_distibution_archive():
return {
'actions': ['do something that requires projectName'],
'doc': 'Creates a zip archive of the application in "dist"',
'targets': ['dist']
}
Is there a way to share or pass the arguments of a task to another one? I have read pretty much everything I could on task creation and dependency on pydoit.org, but haven't found anything similar to what I want.
I am aware that I could use yield to create these two tasks at the same time, but I'd like to use a parameter when executing the task, not when I am creating it.
Is there a way to share or pass the arguments of a task to another one?
Yes. Using getargs: http://pydoit.org/dependencies.html#getargs
In your example, you would need to add another action to the task deploy just to save the passed parameter.
You could just use a global variable like commonCommand. If you have more complex needs, create a class to handle it.
class ComplexCommonParams(object):
def __init__(self):
self.command = 'echo'
params = ComplexCommonParams()
commonCommand='echo'
def task_x():
global commonCommand
return {
'actions': [ commonCommand + ' Hello2 > asdf' ],
'targets': ['asdf']
}
def task_y():
global commonCommand
return {
'actions': [ commonCommand+' World' ],
'file_dep': ['asdf'],
'verbosity':2}
Related
if __name__ == '__main__':
rospy.init_node('grounding_node_temp')
rospy.sleep(1.0)
pub = rospy.Publisher('/task_commands', String, queue_size=10, latch=True)
rospy.sleep(1.0)
d = {"storage_left": ['book', 'eraser', 'soap2'], "storage_right": ['snacks', 'biscuits', 'glue', 'soap'] }
pub.publish(json.dumps(d, encoding='ascii'))
rospy.sleep(1.0)
rospy.spin()
How can I subscribe this dict object information?
Or it will be better if I can use this information at other code so that I can obtain the list of objects.
For typical example like talker.py and listener.py, there is rospy.loginfo() in talker.py and listener.py has callback function with rospy.loginfo(rospy.get_caller_id() .... ). And rospy.loginfo(rospy.get_caller_id()) subscribes the information from talker.py. Just like this, I want to get information from above dict object but I don't know how to do so.
you can do this, for example I have my_script.py with the following code
my_dict = {
'id': 1
}
you can access the dict using import statement
import my_script
print(my_script.my_dict) # { 'id': 1 }
I'm writing a script that will insert a new elbv2 listener rule on top of the listener rules already in the alb.
response = elbv2_client.create_rule(
ListenerArn=listener_arn,
Priority=1,
Conditions=[
{
'Field': 'http-request-method',
'HttpRequestMethodConfig': {
'Values': ['GET']
}
}
],
Actions=[
{
'Type': 'fixed-response',
'FixedResponseConfig':
{
'ContentType': 'text/html',
'MessageBody': html_object,
'StatusCode': '504'
}
}
]
)
Just as you would do on the AWS console when you insert a new rule on top and the rules automatically renumber themselves.
Problem is the rule['Priority'] number is not the same as the one being shown in the AWS console (as stated in the note here: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-update-rules.html)
Note:
The console displays a relative sequence number for each rule, not the rule priority.
You can get the priority of a rule by describing it using the AWS CLI or the Elastic Load Balancing API.
And when I use elbv2 create_rule(), it gives me this error if the top most rule is set to Priority = 1.
botocore.errorfactory.PriorityInUseException: An error occurred (PriorityInUse) when calling the CreateRule operation: Priority '1' is currently in use
TIA!
My solution to this was from #jordanm's idea above.
Basically to create 2 functions: One for reindexing the Rule Priorities by adding +1 before adding new rules which will automatically end on the top place. If I'm reverting to the old rules, I have another function that reindexes the Priority rules by subracting -1.
def reindex_forward(elbv2_client, listener_rules):
for rule in reversed(listener_rules):
if rule['Priority'] != 'default':
new_priority = int(rule['Priority']) + 1
elbv2_client.set_rule_priorities(
RulePriorities=[
{
'RuleArn': rule['RuleArn'],
'Priority': new_priority
}
]
)
return
def reindex_back(elbv2_client, listener_rules):
for index, rule in enumerate(listener_rules, start=1):
if rule['Priority'] != 'default':
elbv2_client.set_rule_priorities(
RulePriorities=[
{
'RuleArn': rule['RuleArn'],
'Priority': index
}
]
)
else:
return
return
I have simple class with the public build method I want to test. Currently I assert all values it returns in every test. Is it a good practice or I should write one test for static values and in other tests only check values which change depending on input?
Implementation
class FiltersAttachment:
TYPE_OPTIONS = [
{"text": "All types", "value": "all"},
{"text": ":link: Webpages", "value": "web_pages"}
]
STATUS_OPTIONS = [
{"text": "Available / Unavailable", "value": "all"},
{"text": ":white_circle: Available", "value": "available"},
{"text": ":red_circle: Unavailable", "value": "unavailable"}
]
#classmethod
def _filter_options(cls, options, selected):
return list(filter(lambda t: t['value'] == selected, options))
#classmethod
def build(cls, check_type='', status=''):
return {
'fallback': 'Filters',
'callback_id': 'resource_filters',
'color': '#d2dde1',
'mrkdwn_in': ['text'],
'actions': [
{
'name': 'resource_type',
'text': 'Type',
'type': 'select',
'options': cls.TYPE_OPTIONS,
'selected_options': cls._filter_options(
cls.TYPE_OPTIONS, check_type)
},
{
'name': 'resource_status',
'text': 'Status',
'type': 'select',
'options': cls.STATUS_OPTIONS,
'selected_options': cls._filter_options(
cls.STATUS_OPTIONS, status)
}
]
}
Tests
class TestFiltersAttachment(TestCase):
def assert_attachment(self, attachment):
self.assertEqual(attachment['fallback'], 'Filters')
self.assertEqual(attachment['callback_id'], 'resource_filters')
self.assertEqual(attachment['color'], '#d2dde1')
self.assertEqual(attachment['mrkdwn_in'], ['text'])
type_action = attachment['actions'][0]
self.assertEqual(type_action['name'], 'resource_type')
self.assertEqual(type_action['text'], 'Type')
self.assertEqual(type_action['type'], 'select')
self.assertEqual(type_action['options'][0]['text'], 'All types')
self.assertEqual(type_action['options'][0]['value'], 'all')
self.assertEqual(type_action['options'][1]['text'], ':link: Webpages')
self.assertEqual(type_action['options'][1]['value'], 'web_pages')
status_action = attachment['actions'][1]
self.assertEqual(status_action['name'], 'resource_status')
self.assertEqual(status_action['text'], 'Status')
self.assertEqual(status_action['type'], 'select')
self.assertEqual(status_action['options'][0]['text'], 'Available / Unavailable')
self.assertEqual(status_action['options'][0]['value'], 'all')
self.assertEqual(status_action['options'][1]['text'], ':white_circle: Available')
self.assertEqual(status_action['options'][1]['value'], 'available')
self.assertEqual(status_action['options'][2]['text'], ':red_circle: Unavailable')
self.assertEqual(status_action['options'][2]['value'], 'unavailable')
def test_all_type_selected(self):
attachment = FiltersAttachment.build(check_type='all')
self.assert_attachment(attachment)
selected_type = attachment['actions'][0]['selected_options'][0]
self.assertEqual(selected_type['text'], 'All types')
self.assertEqual(selected_type['value'], 'all')
def test_all_status_selected(self):
attachment = FiltersAttachment.build(status='all')
self.assert_attachment(attachment)
selected_status = attachment['actions'][1]['selected_options'][0]
self.assertEqual(selected_status['text'], 'Available / Unavailable')
self.assertEqual(selected_status['value'], 'all')
...
One of the criteria for the quality of a test suite is, how well the test suite supports you in case of test failures to identify the problem. Ideally, you should be able to identify the problem alone by looking at which tests failed and which did not. You should not need to use a debugger to find out what actually went wrong.
The way you have written your tests will not give you the best possible support. You have packed many assertions in one test function. Therefore, the test functions will fail for many different reasons, and when you see one of the functions fail, you will have to do a detailed analysis or use debugging to find out for which reason it failed. When making your tests check aspects redundantly (as you have asked in your question), you make them even less specific, which makes the problem worse.
Therefore, each test should check one specific aspect, such that a failure of a test gives the most specific information. This is achieved by the combination of the following two principles:
Each test should verify one specific aspect.
There should not be redundant tests for the same aspect.
Turning each assertion into a test of its own can be done conveniently with the help of so called parameterized tests. Some hints for Python can be found at this question: How do you generate dynamic (parameterized) unit tests in python?
I'm trying to separate various functions in my program to keep things neat. And I'm getting stuck trying to use variables created in one module in another module. I tried using global list_of_names but it wasn't working, and I've read that it's recommended not to do so anyway.
Below is a sample of my code. In my opinion, it doesn't make sense to pass list_of_names as a function argument because there are multiple other variables that I need to do this with, aside from the actual arguments that do get passed.
Unfortunately, even if I were to move read_json into engine.py, I'd still have the same problem in main.py as I need to reference list_of_names there as well.
# main.py:
import json
from engine import create_person
def read_json():
with open('names.json', 'r') as file
data = json.load(file)
return data
list_of_names = read_json()
person1 = create_person()
# engine.py:
from random import choice
def create_person():
name = choice(list_of_names)
new_person = {
'name': name,
# other keys/values created in similar fashion
}
return new_person
EDIT1:
Here's my new code. To me, this doesn't seem efficient to have to build the parameter list and then deconstruct it inside the function. (I know I'm reusing variable names for this example) Then I have to pass some of those parameters to other functions.
# main.py:
import json
from engine import create_person
def read_json():
with open('names.json', 'r') as file
data = json.load(file)
return data
player_id_index = 0
list_of_names = read_json()
person_parameters = [
list_of_names,
dict_of_locations,
player_id_index,
dict_of_occupations,
.
.
.
]
person1, player_id_index = create_person()
# engine.py:
from random import choice
def create_person(person_params):
list_of_names = person_params[0]
dict_of_locations = person_params[1]
player_id_index = person_params[2]
dict_of_occupations = person_params[3]
.
.
.
attr = person_params[n]
name = choice(list_of_names)
location = get_location(dict_of_locations) # a function elsewhere in engine.py
p_id = player_id_index
occupation = get_occupation(dict_of_occupations) # a function elsewhere in engine.py
new_person = {
'name': name,
'hometown': location,
'player id': p_id,
'occupation': occupation,
.
.
.
}
player_id_index += 1
return new_person, player_id_index
In general you should not be relying on shared global state. If you need to share state encapsulate the state in objects or pass as function arguments.
Regarding your specific problem it looks like you want to assemble random dictionaries from a set of options. It could be coded like this:
from random import choice
person_options = {
'name': ['fred', 'mary', 'john', 'sarah', 'abigail', 'steve'],
'health': [6, 8, 12, 15],
'weapon': ['sword', 'bow'],
'armor': ['naked', 'leather', 'iron']
}
def create_person(person_options):
return {k:choice(opts) for k, opts in person_options.items()}
for _ in range(4):
print create_person(person_options)
In action:
>>> for _ in range(4):
... print(create_person(person_options))
...
{'armor': 'naked', 'weapon': 'bow', 'health': 15, 'name': 'steve'}
{'armor': 'iron', 'weapon': 'sword', 'health': 8, 'name': 'fred'}
{'armor': 'iron', 'weapon': 'sword', 'health': 6, 'name': 'john'}
{'armor': 'iron', 'weapon': 'sword', 'health': 12, 'name': 'john'}
Note that a dictionary like {'armor': 'naked', 'weapon': 'bow', 'health': 15, 'name': 'steve'} looks like it might want to be an object. A dictionary is a glob of state without any defined behavior. If you make a class to house this state the class can grow methods that act on that state. Of course, explaining all this could make this answer really really long. For now, just realize that you should move away from having shared state that any old bit of code can mess with. A little bit of discipline on this will make your code much easier to refactor later on.
This addresses your edited question:
from random import choice
from itertools import count
from functools import partial
person_options = {
'name': partial(
choice, ['fred', 'mary', 'john', 'sarah', 'abigail', 'steve']),
'location': partial(
get_location, {'heaven':1, 'hell':2, 'earth':3}),
'player id': count(1).next
}
def create_person(person_options):
return {k:func() for k, func in person_options.items()}
However, we are now way beyond the scope of your original question and getting into specifics that won't be helpful to anyone other than you. Such questions are better asked on Code Review Stack Exchange
I have read the documentation, but I am not exactly sure how to implement serializer.serialize for JSON objects in my view.py. If anyone can help me understand this a little better. I have the following code in my view.py:
#user_passes_test(lambda u: u.is_superuser)
def ProjDetails(request):
proj_id = request.GET['proj_id']
proj = Proj.objects.filter(id=proj_id)
role_list = ProjRole.objects.filter(proj=proj)
proj = {
"proj": proj,
"roles": []
}
for r in role_list:
proj['roles'].append(r.id)
return HttpResponse(json.dumps(proj), content_type='application/json; charset=UTF-8')
I am trying to call this with .ajax (I am still working on the ajax, so it probably is not right):
$('#proj_list #sel_proj').click(function(){
$('div.sel').removeClass("sel");
$(this).addClass("sel");
var project_id = $(this).data('id');
$.ajax({
url:'../../proj_details',
data: {proj_id: proj_id},
// dataType: 'html',
success: function(data){
$('#proj_display').html(data)
},
error: function () {
alert("Failed to find the project!")
}
});
Once I get the JSON call to work, then I will focus more on the ajax.
Biggest problem, I am getting a 500 http error with:
TypeError at ../proj_details
[<Project: Example>] is not JSON serializable
I am using Django 1.7, but I even added SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' to my settings.py without any luck. So I imported serializers from the django.core and tried to use serializer.serialize, but I am not understanding how to implement it I guess because my errors just keep getting worse. I have seen other posts with the same error, but still not understanding for my particular requirements.
+++++++++++++++ EDIT +++++++++++++++++++
So the only way I have been able to get this to work without multiple errors, circular errors, multiple argument errors, etc, is the following:
def ProjDetails(request):
def date_handler(obj):
return obj.strftime("%B %d, %Y") if hasattr(obj, 'strftime') else obj
proj_id = request.GET['proj_id']
proj = Proj.objects.get(id=proj_id)
corp = Corp.objects.get(id=proj.corp.id)
role_list = ProjRole.objects.filter(proj=proj).all()
proj = {
"proj": {
'title': proj.title,
'id': proj.id,
'date': proj.date,
'description': proj.description
}
"roles": [],
"company": {
'name': corp.name,
'pic': unicode(corp.pic),
}
}
for r in role_list:
proj['roles'].append(r.name)
return HttpResponse(json.dumps(proj, default=date_handler), content_type='application/json; charset=UTF-8')
The only thing I don't like about this is I actually have to manually pull what attributes I want from the model into the dictionary, instead of all the attributes being pulled from the model and then I can choose which ones I want to use in my templates. I would rather not have to pull everything like my example above. The 'roles' = [] is giving me some hiccups too because I can't seem to get it to work when there are multiple roles for a proj object.
I like Eugene's method because it would be cleaner, but I can't seem to get it to work with the corp model. The proj tables have a corp_id, yet I keep getting corp_id is not an attribute when I attempt it with using .value().get() for the proj object. I don't understand how to implement grzgrzgrz3's answer either. I usually work more with JS, HTML, and CSS, and I am new to Django/python for web development.
So any suggestions to make this more efficient would be great. Thank!!
Django model's instance can't be serialized, you should use values() method to retrieve dict instead of class instance. Also, you can use only() method to retrieve only id field for roles:
proj = Proj.objects.filter(id=proj_id).values().get()
role_list = ProjRole.objects.only("id").filter(proj__id=proj_id)
proj = {
"proj": proj,
"roles": role_list
}
Write custom HttpResponse and handle there all not serializable python/django objects.
class HttpJsonResponse(HttpResponse):
content_type="application/json"
def __init__(self,data):
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.date):
serial = obj.isoformat()
return serial
json_data = json.dumps(data, indent=4, default=json_serial)
super(HttpJsonResponse, self).__init__(json_data, self.content_type)
In the example function json_serial converting datetime.date object into string object which is serializable.
*UPDATE
You can mix both answers.
def ProjDetails(request):
proj_id = request.GET['proj_id']
proj = Proj.objects.filter(id=proj_id).values().get()
corp = Corp.objects.filter(id=proj.corp.id).values().get()
role_list = ProjRole.objects.filter(proj=proj).values().all()
proj = {
"proj": proj,
"roles": role_list,
"company": corp
}
return HttpJsonResponse(proj)
Make sure you are importing datetime module.
import datetime
instead datetime class
import datetime.datetime
My answer, as described up above. This is what worked for me.
def ProjDetails(request):
def date_handler(obj):
return obj.strftime("%B %d, %Y") if hasattr(obj, 'strftime') else obj
proj_id = request.GET['proj_id']
proj = Proj.objects.get(id=proj_id)
corp = Corp.objects.get(id=proj.corp.id)
role_list = ProjRole.objects.filter(proj=proj).all()
proj = {
"proj": {
'title': proj.title,
'id': proj.id,
'date': proj.date,
'description': proj.description
}
"roles": [],
"company": {
'name': corp.name,
'pic': unicode(corp.pic),
}
}
for r in role_list:
proj['roles'].append(r.name)
return HttpResponse(json.dumps(proj, default=date_handler), content_type='application/json; charset=UTF-8')