I tried to compare DRF response and input value.
class ViewTest(TransactionTestCase):
reset_sequences = True
current_date_time = timezone.now()
def setUp(self):
self.client = APIClient()
self.user = User.objects.create_user('hiren', 'a#b.com', 'password')
self.client.force_authenticate(user=self.user)
self.tag = Tag.objects.create(name="Test tag")
Notes.objects.create(tag=self.tag, content="test content", date=self.current_date_time)
def test_return_correct_note(self):
response = self.client.get('/api/notes/1/')
self.assertEqual(response.json(), {'content': 'test content', 'id': 1,
'tag': 1, 'date': self.current_date_time})
Then I got this error :
AssertionError: {'date': '2016-04-09T07:35:28.039393Z', 'co[37 chars]': 1} != {'tag': 1, 'content': 'test content', 'id':[69 chars]TC>)}
{'content': 'test content',
- 'date': '2016-04-09T07:35:28.039393Z',
+ 'date': datetime.datetime(2016, 4, 9, 7, 35, 28, 39393, tzinfo=<UTC>),
'id': 1,
'tag': 1}
What is the correct way to compare django datetime ?
You could either convert the Python datetime object into a ISO time string, or parse the ISO time string into a python datetime object.
For example
...
'tag': 1, 'date': self.current_date_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')})
You can make use of the to_representation(...) method of the DateTimeField class as
from django.test import TransactionTestCase
class ViewTest(TransactionTestCase):
reset_sequences = True
current_date_time = timezone.now()
def setUp(self):
self.client = APIClient()
self.user = User.objects.create_user("hiren", "a#b.com", "password")
self.client.force_authenticate(user=self.user)
self.tag = Tag.objects.create(name="Test tag")
Notes.objects.create(
tag=self.tag, content="test content", date=self.current_date_time
)
def test_return_correct_note(self):
from rest_framework.fields import DateTimeField
response = self.client.get("/api/notes/1/")
self.assertEqual(
response.json(),
{
"content": "test content",
"id": 1,
"tag": 1,
"date": DateTimeField().to_representation(self.current_date_time),
},
)
Related
I am trying to send email alerts for all new alerts that have just been created. I have tried
last_alert = Alert.objects.filter(kiosk=kiosk).last()
But that only gets the last alert and it triggers the same one all the time. It is possible to have 3 alerts be triggered at once. I am trying to implement a flag to know whether or not an alert has been sent. I'm probably using latest wrong here.
last_alert = Alert.objects.filter(kiosk=kiosk).latest('pk')
if last_alert.created_on:
alert_status = HTTP_208_ALREADY_REPORTED
send_alert_email = False
else:
alert_status = HTTP_201_CREATED
send_alert_email = True
last_alert.created_on = datetime.now(last_alert.kiosk.location.timezone)
Alert.create(kiosk=kiosk, created_on=datetime.now(last_alert.kiosk.location.timezone))
last_alert.save()
# Get Timezone aware date and time
current_dt = datetime.now().astimezone(kiosk.location.timezone)
current_time = current_dt.strftime('%I:%M %p')
current_date = current_dt.strftime('%m/%d/%Y')
email_props2 = {
'method': 'EMAIL',
'email': 'john#example.com',
'data': {
'facility': last_alert.kiosk.location.name,
'description': last_alert.description,
'pk': last_alert.pk,
'time': current_time,
'date': current_date,
'kioskName': kiosk.name,
'alert_type_display': last_alert.alert_type_display
}
}
if send_alert_email:
_send_email(
[email_props2['email']],
{'data': email_props2['data']},
ALERT_TEMPLATE_ID
)
Maybe I am approaching this problem wrong with the flag. Any help is very much appreciated.
thanks in advance
I have a solution. I added a processed field to the Alert model default it to False. Then filter all Alerts with field processed=False. Loop through all of the Alerts, if processed=False send an email, then set processed=True.
last_alert = Alert.objects.filter(kiosk=kiosk, processed=False)
# Get Timezone aware date and time
for alert in last_alert:
if alert.processed == False:
current_dt = datetime.now().astimezone(kiosk.location.timezone)
current_time = current_dt.strftime('%I:%M %p')
current_date = current_dt.strftime('%m/%d/%Y')
email_props2 = {
'method': 'EMAIL',
'email': 'john#example.com',
'data': {
'facility': alert.kiosk.location.name,
'description': alert.description,
'pk': alert.pk,
'time': current_time,
'date': current_date,
'kioskName': kiosk.name,
'alert_type_display': alert.alert_type_display
}
}
# Straight up send it, dude
_send_email(
[email_props2['email']],
{'data': email_props2['data']},
ALERT_TEMPLATE_ID
)
alert.processed = True
alert.save()
I have a nested serializer containing, containing an Image Field in the nested serializer, the serializers are:-
class FloorPlanLocationSerializer(serializers.ModelSerializer):
class Meta:
model = FloorPlan
fields = (
'floor',
'image',
)
extra_kwargs = {'floor': {'required': False}, 'image': {'required': False}}
class LocationSerializer(FilterSerializerByOrgManaged, serializers.ModelSerializer):
floorplan = FloorPlanLocationSerializer(required=False, allow_null=True)
class Meta:
model = Location
fields = (
'id',
'organization',
'name',
'type',
'is_mobile',
'address',
'geometry',
'created',
'modified',
'floorplan',
)
read_only_fields = ('created', 'modified')
def to_representation(self, instance):
request = self.context['request']
data = super().to_representation(instance)
floorplans = instance.floorplan_set.all().order_by('-modified')
floorplan_list = []
for floorplan in floorplans:
dict_ = {
'floor': floorplan.floor,
'image': request.build_absolute_uri(floorplan.image.url),
}
floorplan_list.append(dict_)
data['floorplan'] = floorplan_list
return data
def create(self, validated_data):
floorplan_data = None
if validated_data.get('floorplan'):
floorplan_data = validated_data.pop('floorplan')
instance = self.instance or self.Meta.model(**validated_data)
with transaction.atomic():
instance.full_clean()
instance.save()
if floorplan_data:
floorplan_data['location'] = instance
floorplan_data['organization'] = instance.organization
with transaction.atomic():
fl = FloorPlan.objects.create(**floorplan_data)
fl.full_clean()
fl.save()
return instance
With this above serialzier, it works fine with DRF Browsable page, but when I try to send the data with the test client in multipart format, the nested data gets removed while send the POST request, this is how I wrote the tests:-
def test_create_location_with_floorplan_api(self):
path = reverse('geo_api:list_location')
coords = json.loads(Point(2, 23).geojson)
image = Image.new("RGB", (100, 100))
with tempfile.NamedTemporaryFile(suffix=".png", mode="w+b") as tmp_file:
image.save(tmp_file, format="png")
tmp_file.seek(0)
byio = BytesIO(tmp_file.read())
inm_file = InMemoryUploadedFile(
file=byio,
field_name="avatar",
name="testImage.png",
content_type="image/png",
size=byio.getbuffer().nbytes,
charset=None,
)
data = {
'organization': self._get_org().pk,
'name': 'test-location',
'type': 'indoor',
'is_mobile': False,
'address': 'Via del Corso, Roma, Italia',
'geometry': {'Type': 'Point', 'coordinates': [12.32,43.222]},
'floorplan': {
'floor': 12,
'image': inm_file
},
}
with self.assertNumQueries(6):
response = self.client.post(path, data, format='multipart')
self.assertEqual(response.status_code, 201)
The data doesn't come in the same format as I sent, i.e., when I try to see the data in the to_internal method this is how I receive it:-
<QueryDict: {'organization': ['f6c406e5-0602-44a7-9160-ec109ac29f4c'], 'name': ['test-location'], 'type': ['indoor'], 'is_mobile': ['False'], 'address': ['Via del Corso, Roma, Italia'], 'geometry': ['type', 'coordinates'], 'floorplan': ['floor', 'image']}>
the values of type, coordinates, floorplan are not present inside it.
How can I write a proper tests for the above case???
If you want to post form data, you need to flatten everything the same way a browser would. Maybe this gist will help, flatten_dict_for_form_data. Its quite old and could use some cleanup, but it still works.
This recursively flattens a dict, which you can then send to test client (or to live services):
def flatten_dict_for_formdata(input_dict, sep="[{i}]"):
def __flatten(value, prefix, result_dict, previous=None):
if isinstance(value, dict):
if previous == "dict":
prefix += "."
for key, v in value.items():
__flatten(v, prefix + key, result_dict, "dict")
elif isinstance(value, (list, tuple)):
for i, v in enumerate(value):
__flatten(v, prefix + sep.format(i=i), result_dict)
else:
result_dict[prefix] = value
return result_dict
return __flatten(input_dict, '', {})
>>> flatten_dict_for_formdata({
>>> "name": "Test",
>>> "location": {"lat": 1, "lng": 2},
>>> "sizes": ["S", "M", "XL"]
>>> })
>>> {
>>> "name": "Test",
>>> "location.lat": 1,
>>> "location.lng": 2,
>>> "sizes[0]": "S",
>>> "sizes[1]": "M",
>>> "sizes[2]": "XL"
>>> }
I have the following function that I need to test:
def function_to_test(host: str, prefix: str, file_reg_ex=None, dir_reg_ex=None):
s3_client = boto3.client('s3')
s3_paginator = s3_client.get_paginator('list_objects')
response_iterator = s3_paginator.paginate(
Bucket=host,
Prefix=prefix,
PaginationConfig={
'PageSize': 1000
}
)
ret_dict = {}
for page in response_iterator:
for s3_object in page['Contents']:
key = s3_object['Key']
sections = str(key).rsplit('/', 1)
key_dir = sections[0]
file_name = sections[1]
if (file_reg_ex is None or re.search(file_reg_ex, file_name)) and \
(dir_reg_ex is None or re.search(dir_reg_ex, key_dir)):
ret_dict[key] = {
'ETag': s3_object['ETag'],
'Last-Modified': s3_object['LastModified'].timestamp()
}
return ret_dict
It looks like I need to use the boto stubber referenced here: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/stubber.html#botocore-stub
In the documentation they make a response that is returned from a 'list-objects' S3 request but this will not work for a paginator as it returns a botocore.paginate.PageIterator object. How can this functionality be mocked?
It was suggested to look into https://pypi.org/project/boto3-mocking/ and https://github.com/spulec/moto but due to time constraints I did a more simple workaround.
#staticmethod
def get_s3_resp_iterator(host, prefix, s3_client):
s3_paginator = s3_client.get_paginator('list_objects')
return s3_paginator.paginate(
Bucket=host,
Prefix=prefix,
PaginationConfig={
'PageSize': 1000
}
)
def function_to_test(host: str, prefix: str, file_reg_ex=None, dir_reg_ex=None):
s3_client = boto3.client('s3')
s3_paginator = s3_client.get_paginator('list_objects')
response_iterator = self.get_s3_resp_iterator(host, prefix, s3_client)
ret_dict = {}
for page in response_iterator:
for s3_object in page['Contents']:
key = s3_object['Key']
sections = str(key).rsplit('/', 1)
key_dir = sections[0]
file_name = sections[1]
if (file_reg_ex is None or re.search(file_reg_ex, file_name)) and \
(dir_reg_ex is None or re.search(dir_reg_ex, key_dir)):
ret_dict[key] = {
'ETag': s3_object['ETag'],
'Last-Modified': s3_object['LastModified'].timestamp()
}
return ret_dict
This allows me to do the following in a pretty straight forward manner:
def test_s3(self):
test_resp_iter = [
{
'Contents': [
{
'Key': 'key/key1',
'ETag': 'etag1',
'LastModified': datetime.datetime(2020, 8, 14, 17, 19, 34, tzinfo=tzutc())
},
{
'Key': 'key/key2',
'ETag': 'etag2',
'LastModified': datetime.datetime(2020, 8, 14, 17, 19, 34, tzinfo=tzutc())
}
]
}
]
tc = TestClass()
tc.get_s3_resp_iterator = MagicMock(return_value=test_resp_iter)
ret_dict = tc.function_s3('test_host', '', file_reg_ex=None, dir_reg_ex=None)
self.assertEqual(len(ret_dict), 2)
I have a json config that I want to create a dict from. Because json configs are recursive, any time I see a json value that is an array I want to recursively iterate on it. However this is not doing what I want it to do.
class FieldHandler():
formfields = {}
def __init__(self, fields):
for field in fields:
options = self.get_options(field)
f = getattr(self, "create_field_for_" +
field['type'])(field, options)
self.formfields[field['name']] = f
def get_options(self, field):
options = {}
options['label'] = field['name']
options['help_text'] = field.get("help_text", None)
options['required'] = bool(field.get("required", 0))
return options
def create_field_for_string(self, field, options):
options['max_length'] = int(field.get("max_length", "20"))
return django.forms.CharField(**options)
def create_field_for_int(self, field, options):
options['max_value'] = int(field.get("max_value", "999999999"))
options['min_value'] = int(field.get("min_value", "-999999999"))
return django.forms.IntegerField(**options)
def create_field_for_array(self, field, options):
fh = FieldHandler(field['elements'])
return fh
and instantiating:
fh = FieldHandler([
{'type': 'string', 'name': 'position'},
{'type': 'array', 'name': 'calendar', 'elements': [
{'type': 'string', 'name': 'country'},
{'type': 'string', 'name': 'url'},
]},
{'type': 'int', 'name': 'maxSize'}
])
I expect to get a dict like so:
{
'position': <django.forms.fields.CharField object at 0x10b57af50>,
'calendar': <__main__.FieldHandler instance at 0x10b57c680>,
'maxSize': <django.forms.fields.IntegerField object at 0x10b58e050>,
}
Where calendar itself is expected to be:
{
'url': <django.forms.fields.CharField object at 0x10b58e150>,
'country': <django.forms.fields.CharField object at 0x10b58e0d0>
}
Instead I get:
{
'url': <django.forms.fields.CharField object at 0x10b58e150>,
'position': <django.forms.fields.CharField object at 0x10b57af50>,
'calendar': <__main__.FieldHandler instance at 0x10b57c680>,
'maxSize': <django.forms.fields.IntegerField object at 0x10b58e050>,
'country': <django.forms.fields.CharField object at 0x10b58e0d0>
}
What am I doing wrong? Why are the position and country parameters being set on my global FieldHandler?
formfields is a class attribute that is shared among all instances. Make it an instance attribute instead:
class FieldHandler():
def __init__(self, fields):
self.formfields = {}
# ...
Now, all FieldHandler instances have their own formfields, with only the "inner" calendar handler having the country and url (not position assuming that was a typo) fields.
I am having a problem understanding how mock works and how to write unittests with mock objects. I wanted to mock an external api call every time when my model calls save() method.
My code:
models.py
from . import utils
class Book(Titleable, Isactiveable, Timestampable, IsVoidable, models.Model):
title
orig_author
orig_title
isbn
def save(self, *args, **kwargs):
if self.isbn:
google_data = utils.get_original_title_and_name(self.isbn)
if google_data:
self.original_author = google_data['author']
self.original_title = google_data['title']
super().save(*args, **kwargs)
utils.py
def get_original_title_and_name(isbn, **kawargs):
isbn_search_string = 'isbn:{}'.format(isbn)
payload = {
'key': GOOGLE_API_KEY,
'q': isbn_search_string,
'printType': 'books',
}
r = requests.get(GOOGLE_API_URL, params=payload)
response = r.json()
if 'items' in response.keys():
title = response['items'][THE_FIRST_INDEX]['volumeInfo']['title']
author = response['items'][THE_FIRST_INDEX]['volumeInfo']['authors'][THE_FIRST_INDEX]
return {
'title': title,
'author': author
}
else:
return None
I began read docs and write test:
test.py:
from unittest import mock
from django.test import TestCase
from rest_framework import status
from .constants import THE_FIRST_INDEX, GOOGLE_API_URL, GOOGLE_API_KEY
class BookModelTestCase(TestCase):
#mock.patch('requests.get')
def test_get_original_title_and_name_from_google_api(self, mock_get):
# Define new Mock object
mock_response = mock.Mock()
# Define response data from Google API
expected_dict = {
'kind': 'books#volumes',
'totalItems': 1,
'items': [
{
'kind': 'books#volume',
'id': 'IHxXBAAAQBAJ',
'etag': 'B3N9X8vAMWg',
'selfLink': 'https://www.googleapis.com/books/v1/volumes/IHxXBAAAQBAJ',
'volumeInfo': {
'title': "Alice's Adventures in Wonderland",
'authors': [
'Lewis Carroll'
]
}
}
]
}
# Define response data for my Mock object
mock_response.json.return_value = expected_dict
mock_response.status_code = 200
# Define response for the fake API
mock_get.return_value = mock_response
The first of all, I can't write target for the #mock.patch correct. If a define target as utuls.get_original_title_and_name.requests.get, I get ModuleNotFoundError. Also I can't understand how to make fake-call to external API and verify recieved data (whether necessarly its, if I've already define mock_response.json.return_value = expected_dict?) and verify that my save() method work well?
How do I write test for this cases? Could anyone explain me this case?
You should mock the direct collaborators of the code under test. For Book that would be utils. For utils that would be requests.
So for the BookModelTestCase:
class BookModelTestCase(TestCase):
#mock.patch('app.models.utils')
def test_save_book_calls_google_api(self, mock_utils):
mock_utils.get_original_title_and_name.return_value = {
'title': 'Google title',
'author': 'Google author'
}
book = Book(
title='Some title',
isbn='12345'
)
book.save()
self.assertEqual(book.title, 'Google title')
self.assertEqual(book.author, 'Google author')
mock_utils.get_original_title_and_name.assert_called_once_with('12345')
And then you can create a separate test case to test get_original_title_and_name:
class GetOriginalTitleAndNameTestCase(TestCase):
#mock.patch('app.utils.requests.get')
def test_get_original_title_and_name_from_google_api(self, mock_get):
mock_response = mock.Mock()
# Define response data from Google API
expected_dict = {
'kind': 'books#volumes',
'totalItems': 1,
'items': [
{
'kind': 'books#volume',
'id': 'IHxXBAAAQBAJ',
'etag': 'B3N9X8vAMWg',
'selfLink': 'https://www.googleapis.com/books/v1/volumes/IHxXBAAAQBAJ',
'volumeInfo': {
'title': "Alice's Adventures in Wonderland",
'authors': [
'Lewis Carroll'
]
}
}
]
}
# Define response data for my Mock object
mock_response.json.return_value = expected_dict
mock_response.status_code = 200
# Define response for the fake API
mock_get.return_value = mock_response
# Call the function
result = get_original_title_and_name(12345)
self.assertEqual(result, {
'title': "Alice's Adventures in Wonderland",
'author': 'Lewis Carroll'
})
mock_get.assert_called_once_with(GOOGLE_API_URL, params={
'key': GOOGLE_API_KEY,
'q': 'isbn:12345',
'printType': 'books',
})