I need to write a unit test for the AWS lambda handler. I want to give some JSON files to the handler method and get some output. But I come across trouble with mocking the S3 "event".
My lambda handler looks like this:
def handler(event, context):
print(f'Event: {event}')
s3 = boto3.resource('s3')
bucket = s3.Bucket(event["bucket"])
word_correction = correction.WordCorrection()
for obj in bucket.objects.all():
key = obj.key
body = obj.get()['Body'].read()
data = get_data_from_file(body)
if key.endswith('.json'):
try:
word_correction.create_duplicated_words_file(data)
except Exception as e:
print(e)
return {
"success": False,
"response": f"Failed to read file - {e}"
}
try:
corrected_word_list = word_correction.spell_words(json.loads(body))
except Exception as e:
print(e)
return {
"success": False,
"response": f"Failed to correct words - {e}"
}
else:
return {
"success": False,
"response": "Invalid file type. File must have .json extension."
}
return {
"success": True,
"response": corrected_word_list
}
Here I put some JSON data to my module word_correction.
From this point my unit test
S3_BUCKET_NAME = 'dev-ayazv-lambda-spell-correction'
DEFAULT_REGION = 'us-east-1'
S3_TEST_FILE_KEY = 'pdfs/manual_test/page-data-page-9.json'
S3_TEST_FILE_CONTENT = {"Blocks": [{"BlockType": "WORD", "Confidence": 93.18, "Text": "Test"}]}
#mock_s3
class TestLambdaFunction(unittest.TestCase):
def setUp(self):
self.s3 = boto3.resource('s3', region_name=DEFAULT_REGION)
self.s3_bucket = self.s3.create_bucket(Bucket=S3_BUCKET_NAME)
self.s3_bucket.put_object(Key=S3_TEST_FILE_KEY,
Body=json.dumps(S3_TEST_FILE_CONTENT))
def test_get_data_from_file(self):
from functions.spell_correction.src.index import get_data_from_file
json_encode_data = json.dumps(S3_TEST_FILE_CONTENT, indent=2).encode('utf-8')
file_content = get_data_from_file(json_encode_data)
self.assertEqual(file_content, S3_TEST_FILE_CONTENT)
def test_handler(self):
from functions.spell_correction.src.index import handler
event = {
'bucket': {
'name': S3_BUCKET_NAME
},
'object': {
'key': S3_TEST_FILE_KEY
}
}
result = handler(event, {})
self.assertEqual(result, {"success": True, "response": []})
I was trying to mock the S3_Bucket S3_File_Key and S3_File Content.
But I face with the problem that my tests drops in this moment
bucket = {'name': 'dev-ayazv-lambda-spell-correction'}
def validate_bucket_name(params, **kwargs):
if 'Bucket' not in params:
return
bucket = params['Bucket']
> if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
E TypeError: expected string or bytes-like object
Related
I'm trying to write a lambda function to upload files to my S3 bucket.
I'm new to coding so I don't understand why this doesn't work.
I get a "KeyError" for Body. Can anyone explain what this means and how do I fix my code?
Thanks in advance.
import base64
import boto3
import json
import uuid
s3 = boto3.client('s3')
def lambda_handler(event, context):
print(event)
response = s3.put_object(
Bucket='s3-bucket',
Key=str(uuid.uuid4()) + '.jpg',
Body=event.body,
)
image = response['Body'].read()
return {
'headers': {
"Content-Type": "image/jpg",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token",
"Access-Control-Allow-Methods": "*",
"Access-Control-Allow-Credentials": True,
},
'statusCode': 200,
'body': base64.b64encode(image).decode('utf-8'),
'isBase64Encoded': True
}
I tried replacing Body = event.body with Body=event['body'] or Body = event('body') but it still doesn't work.
I expect my lambda function to be able to upload a file to my S3 bucket.
Your call to put_object will raise an exception if it fails. You can catch this and respond accordingly, for example:
import boto3
from botocore.exceptions import ClientError
def lambda_handler(event, context):
print(event)
try:
response = s3.put_object(
Bucket='s3-bucket',
Key=str(uuid.uuid4()) + '.jpg',
Body=event.body)
return {
'headers': { ... },
'statusCode': 200
}
except ClientError as e:
print("Error from put_object:", event)
return {
'headers': { ... },
'statusCode': 500
}
I am trying to mock some S3 operations and after banging my head against the stubber object, I tried doing something as follows:
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "ListObjectsV2":
return {
"KeyCount": 1,
"Contents": [
{"Key": "sensor_1", "LastModified": "2021-11-30T12:58:14+00:00"}
],
}
elif operation_name == "GetObjectTagging":
return {"TagSet": []}
elif operation_name == "HeadObject":
return {
"ContentLength": 10,
"ContentType": "gzip",
"ResponseMetadata": {
"Bucket": "1",
},
}
elif operation_name == "GetObject":
content = get_object_response()
return {
"ContentLength": len(content),
"ContentType": "xml",
"ContentEncoding": "gzip",
"Body": content,
"ResponseMetadata": {
"Bucket": "1",
},
}
Ot is the s3 download_fileoperation which is giving me a headache. As far as I can tell it generates, the HeadObjectand GetObjectcalls.
My content generation method is as follows:
def get_object_response():
content = b"<some-valid-xml>"
buf = BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(content)
compressed.close()
return buf.getvalue()
The way it gets used is:
with NamedTemporaryFile() as tmp:
s3_client.download_file(Bucket=..., Key=..., Filename=tmp.name)
However, my test fails with:
elf = <s3transfer.utils.StreamReaderProgress object at 0x116a77820>
args = (262144,), kwargs = {}
def read(self, *args, **kwargs):
> value = self._stream.read(*args, **kwargs)
E AttributeError: 'bytes' object has no attribute 'read'
I simply cannot figure out how to encode the response so that the generated content can be saved.
I have the following python code snippet (assuming the update_item arguments are valid):
#foo.py
def update_table(value):
dynamodb = boto3.resource('dynamodb')
try:
table = dynamodb.Table("test-table")
response = table.update_item(Key=('value' : value)
except botocore.exceptions.ClientError as err:
return None
return response
Now, I would like to write a unit test that test whether this function returns None when a valid arugments are passed but a ClientError is thrown.
I mocked out a DynamoDB Table in a setUp function as shown:
#test_foo.py
#mock_dynamodb2
class TestUpdateTable(unittest.TestCase):
def setUp(self):
self.dynamodb = boto3.resource('dynamodb')
# Creates a Mock Table
self.table = self.dynamodb.create_table(
TableName='test-table',
KeySchema=[
{
'AttributeName': 'value',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'value',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
and in my test function under this class:
def test_returns_none(self):
stubber = Stubber(self.dynamodb.meta.client)
stubber.add_client_error('update_item')
stubber.activate()
response = foo.update_table("123")
stubber.deactivate()
self.assertEqual(response, None)
Now, I expect this to properly stub out the table and raise a ClientError when calling the function to update the table. However, it seems like a ClientError is not raised and the response from the function that I'm testing is not = None as a ClientError is not raised.
Anyone knows the reason for that? And how can I implement this test in a way so that the mock table will raise a ClientError and return None?
In boto3, how can I use Stubber to mock download_fileobj which is a resource method?
For example:
import boto3
from botocore.stub import Stubber
s3 = boto3.resource('s3')
def foo(s3):
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
def test_foo():
s3_test = boto3.resource('s3')
s3_stub = Stubber(s3_test.meta.client)
s3_stub.add_response() # something here
with s3_stub:
foo(s3_test)
get_fileobj translates to a 'head_object' and 'get_object' call. Here's a basic code snippet that stubs both calls.
bucket_name = 'mybucket'
key = 'mykey'
content = 'This is the content'
expected_params = {
'Bucket': bucket_name,
'Key': key,
}
# Head object
response = {
'ContentLength': 10,
'ContentType': 'utf-8',
'ResponseMetadata': {
'Bucket': bucket_name,
}
}
s3_stub.add_response('head_object', response, expected_params)
# Get object
data = BytesIO()
data.write(content)
data.seek(0)
response = {
'ContentLength': len(content),
'ContentType': 'utf-8',
'Body': data,
'ResponseMetadata': {
'Bucket': bucket_name,
}
}
s3_stub.add_response('get_object', response, expected_params)
I am doing some thing wrong over here, while comparing two images in different S3 Bucket.
Even though, I am comparing images of male and female it would give 99% confidence
or am i missing something in the declaration yet
Maybe This line is causing a problem
key_target = "targett/" + key
Or my event code is error prone this is where i have mentioned my source bucket ,even though i have mentioned it in lambda function for testing below. What else do i need to correct so that it will return the confidence within the rang specified
from __future__ import print_function
import boto3
from decimal import Decimal
import json
import urllib
print('Loading function')
rekognition = boto3.client('rekognition')
#iot = boto3.client('iot-data')
# --------------- Helper Functions to call Rekognition APIs ------------------
def compare_faces(bucket, key, key_target, threshold=75):
response = rekognition.compare_faces(
SourceImage={
"S3Object": {
"Bucket": 'dacss',
"Name": 'obama.jpg',
}
},
TargetImage={
"S3Object": {
"Bucket": 'targett',
"Name": 'michelle.jpg',
}
},
SimilarityThreshold=threshold,
)
return response['SourceImageFace'], response['FaceMatches']
# --------------- Main handler ------------------
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']
['key'].encode('utf8'))
key_target = "targett/" + key
try:
response = compare_faces(bucket, key, key_target)
print(response)
# mypayload = json.dumps(response)
# iotResponse = iot.publish(
# topic="rekognition/result",
# qos=1,
# payload=mypayload)
# print(iotResponse)
# return iotResponse
print(response)
return response
except Exception as e:
print(e)
print("Error processing object {} from bucket {}. ".format(key,
bucket)
+
"Make sure your object and bucket exist and your bucket is in
the
same region as this function.")
raise e
---------------output-----------------
Response:
[
{
"BoundingBox": {
"Width": 0.7813892960548401,
"Top": 0.15193353593349457,
"Left": 0.1047489121556282,
"Height": 0.8365015387535095
},
"Confidence": 99.99993896484375
},
[]
]
i think you are having some misunderstanding here
{
'FaceMatches': [
{
'Face': {
'BoundingBox': {
'Height': 0.33481481671333313,
'Left': 0.31888890266418457,
'Top': 0.4933333396911621,
'Width': 0.25,
},
'Confidence': 99.9991226196289,
},
'Similarity': 100,
},
],
'SourceImageFace': {
'BoundingBox': {
'Height': 0.33481481671333313,
'Left': 0.31888890266418457,
'Top': 0.4933333396911621,
'Width': 0.25,
},
'Confidence': 99.9991226196289,
},
'ResponseMetadata': {
'...': '...',
},
}
Here the confidence score does'nt show weather the face matches or not it shows that it found the face in the image. "Similarty" shows the actual match of the image.
These lines:
TargetImage={
"S3Object": {
"Bucket": targett,
"Name": obama.jpg,
}
should be:
TargetImage={
"S3Object": {
"Bucket": 'targett',
"Name": key_target,
}