How to use Stubber to mock download_fileobj? - python

In boto3, how can I use Stubber to mock download_fileobj which is a resource method?
For example:
import boto3
from botocore.stub import Stubber
s3 = boto3.resource('s3')
def foo(s3):
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
def test_foo():
s3_test = boto3.resource('s3')
s3_stub = Stubber(s3_test.meta.client)
s3_stub.add_response() # something here
with s3_stub:
foo(s3_test)

get_fileobj translates to a 'head_object' and 'get_object' call. Here's a basic code snippet that stubs both calls.
bucket_name = 'mybucket'
key = 'mykey'
content = 'This is the content'
expected_params = {
'Bucket': bucket_name,
'Key': key,
}
# Head object
response = {
'ContentLength': 10,
'ContentType': 'utf-8',
'ResponseMetadata': {
'Bucket': bucket_name,
}
}
s3_stub.add_response('head_object', response, expected_params)
# Get object
data = BytesIO()
data.write(content)
data.seek(0)
response = {
'ContentLength': len(content),
'ContentType': 'utf-8',
'Body': data,
'ResponseMetadata': {
'Bucket': bucket_name,
}
}
s3_stub.add_response('get_object', response, expected_params)

Related

Writing a lambda function to upload files to my S3 bucket

I'm trying to write a lambda function to upload files to my S3 bucket.
I'm new to coding so I don't understand why this doesn't work.
I get a "KeyError" for Body. Can anyone explain what this means and how do I fix my code?
Thanks in advance.
import base64
import boto3
import json
import uuid
s3 = boto3.client('s3')
def lambda_handler(event, context):
print(event)
response = s3.put_object(
Bucket='s3-bucket',
Key=str(uuid.uuid4()) + '.jpg',
Body=event.body,
)
image = response['Body'].read()
return {
'headers': {
"Content-Type": "image/jpg",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token",
"Access-Control-Allow-Methods": "*",
"Access-Control-Allow-Credentials": True,
},
'statusCode': 200,
'body': base64.b64encode(image).decode('utf-8'),
'isBase64Encoded': True
}
I tried replacing Body = event.body with Body=event['body'] or Body = event('body') but it still doesn't work.
I expect my lambda function to be able to upload a file to my S3 bucket.
Your call to put_object will raise an exception if it fails. You can catch this and respond accordingly, for example:
import boto3
from botocore.exceptions import ClientError
def lambda_handler(event, context):
print(event)
try:
response = s3.put_object(
Bucket='s3-bucket',
Key=str(uuid.uuid4()) + '.jpg',
Body=event.body)
return {
'headers': { ... },
'statusCode': 200
}
except ClientError as e:
print("Error from put_object:", event)
return {
'headers': { ... },
'statusCode': 500
}

How to check AWS lambda handler in unit test python

I need to write a unit test for the AWS lambda handler. I want to give some JSON files to the handler method and get some output. But I come across trouble with mocking the S3 "event".
My lambda handler looks like this:
def handler(event, context):
print(f'Event: {event}')
s3 = boto3.resource('s3')
bucket = s3.Bucket(event["bucket"])
word_correction = correction.WordCorrection()
for obj in bucket.objects.all():
key = obj.key
body = obj.get()['Body'].read()
data = get_data_from_file(body)
if key.endswith('.json'):
try:
word_correction.create_duplicated_words_file(data)
except Exception as e:
print(e)
return {
"success": False,
"response": f"Failed to read file - {e}"
}
try:
corrected_word_list = word_correction.spell_words(json.loads(body))
except Exception as e:
print(e)
return {
"success": False,
"response": f"Failed to correct words - {e}"
}
else:
return {
"success": False,
"response": "Invalid file type. File must have .json extension."
}
return {
"success": True,
"response": corrected_word_list
}
Here I put some JSON data to my module word_correction.
From this point my unit test
S3_BUCKET_NAME = 'dev-ayazv-lambda-spell-correction'
DEFAULT_REGION = 'us-east-1'
S3_TEST_FILE_KEY = 'pdfs/manual_test/page-data-page-9.json'
S3_TEST_FILE_CONTENT = {"Blocks": [{"BlockType": "WORD", "Confidence": 93.18, "Text": "Test"}]}
#mock_s3
class TestLambdaFunction(unittest.TestCase):
def setUp(self):
self.s3 = boto3.resource('s3', region_name=DEFAULT_REGION)
self.s3_bucket = self.s3.create_bucket(Bucket=S3_BUCKET_NAME)
self.s3_bucket.put_object(Key=S3_TEST_FILE_KEY,
Body=json.dumps(S3_TEST_FILE_CONTENT))
def test_get_data_from_file(self):
from functions.spell_correction.src.index import get_data_from_file
json_encode_data = json.dumps(S3_TEST_FILE_CONTENT, indent=2).encode('utf-8')
file_content = get_data_from_file(json_encode_data)
self.assertEqual(file_content, S3_TEST_FILE_CONTENT)
def test_handler(self):
from functions.spell_correction.src.index import handler
event = {
'bucket': {
'name': S3_BUCKET_NAME
},
'object': {
'key': S3_TEST_FILE_KEY
}
}
result = handler(event, {})
self.assertEqual(result, {"success": True, "response": []})
I was trying to mock the S3_Bucket S3_File_Key and S3_File Content.
But I face with the problem that my tests drops in this moment
bucket = {'name': 'dev-ayazv-lambda-spell-correction'}
def validate_bucket_name(params, **kwargs):
if 'Bucket' not in params:
return
bucket = params['Bucket']
> if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
E TypeError: expected string or bytes-like object

Use AWS Lambda and SES to send an email

I ran the following script which works on my computer but doesnt work in AWS Lambda. All I did was added "def lambda_handler(event, context): return event" function as its required by the lambda. I ran the Lambda test a few times.
I am not getting the SES email and there's no errors when I execute it in Lambda. Any idea why?
import boto3
from botocore.exceptions import ClientError
SENDER = "Sender Name <testu#test.com>"
RECIPIENT = "test#test.com"
CONFIGURATION_SET = "ConfigSet"
AWS_REGION = "ap-southeast-2"
SUBJECT = "Amazon SES Test (SDK for Python)"
BODY_TEXT = ("Amazon SES Test (Python)\r\n"
"This email was sent with Amazon SES using the "
"AWS SDK for Python (Boto)."
)
BODY_HTML = """<html>
</html>
"""
def lambda_handler(event, context):
return event
# The character encoding for the email.
CHARSET = "UTF-8"
# Create a new SES resource and specify a region.
client = boto3.client('ses',region_name=AWS_REGION)
try:
response = client.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': BODY_HTML,
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
},
Source=SENDER,
ConfigurationSetName=CONFIGURATION_SET,
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print(response['MessageId'])
Just like what Anon Coward said, you have to perform the SES sending inside the handler function and put the return statement at the bottom of that function.
It should look something like this:
def lambda_handler(event, context):
response = client.send_email(PAYLOAD_HERE_)
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps({
"Region ": json_region
})
}

Using Lambda to get image from S3 returns a white box in Python

I'm trying to get my image from S3 bucket and return it. Here's the code:
import base64
import boto3
import json
import random
s3 = boto3.client('s3')
def lambda_handler(event, context):
number = random.randint(0,1)
if number == 1:
response = s3.get_object(
Bucket='bucket-name',
Key='image.png',
)
image = response['Body'].read()
return {
'headers': { "Content-Type": "image/png" },
'statusCode': 200,
'body': base64.b64encode(image).decode('utf-8'),
'isBase64Encoded': True
}
else:
return {
'headers': { "Content-type": "text/html" },
'statusCode': 200,
'body': "<h1>This is text</h1>",
}
When I hit my endpoint, an image of a tiny white box is returned. I know image.png exists in my bucket, and when I use the web GUI to open it in my browser, the image is loaded properly. What am I exactly doing wrong here? And in case it matters, here's how I'm uploading the image to S3 (from another Lambda):
...
# Prepare image for S3
buffer = io.BytesIO()
my_image.save(buffer, 'PNG')
buffer.seek(0) # Rewind pointer back to start
response = s3.put_object(
Bucket=S3_BUCKET_NAME,
Key=f'{S3_KEY}{filename}.png',
Body=buffer,
ContentType='image/png',
)
...
In the above code, my_image is just an image I created using the PIL library.
Thanks for any help!
Here it is how I do this:
Your lambda with corrected body:
import base64
import boto3
import json
import random
s3 = boto3.client('s3')
def lambda_handler(event, context):
response = s3.get_object(
Bucket='bucket-name',
Key='image.png',
)
image = response['Body'].read()
return {
'headers': { "Content-Type": "image/png" },
'statusCode': 200,
'body': base64.b64encode(image),
'isBase64Encoded': True
}
API gateway settings
Integration Request

Why we have to specify the attribute type when use boto3 client and not in resource?

#app.route("/companies/<string:companyId>/<string:name>/")
def get_search(companyId,name):
resp = client.get_item(
TableName=COMPANIES_TABLE,
Key={
'companyId': { 'S': companyId },
'name': { 'S': name }
}
)
item = resp.get('Item')
if not item:
return jsonify({'error': 'Company does not exist'}), 404
return jsonify({
'companyId': item.get('companyId').get('S'),
'name': item.get('name').get('S'),
'region': item.get('region').get('S')
})
The response from a DynamoDB resource object looks doesn't require me to parse the low level data structure from DynamoDB, but when I use the boto3 client I have to do that, why is that?
response = table.scan(
FilterExpression=Attr('name').eq(name)
)
item = response['Items']
import pdb;pdb.set_trace()
if not item:
return jsonify({'error': 'Company does not exist'}), 404
return jsonify({
'companyId': item.get('companyId'),
'name': item.get('name'),
'region': item.get('region')
})
In general the resource API in boto3 is a higher level abstraction from the underlying client API. It tries to hide some of the implementation details of the underlying client calls, but comes at a performance cost.
You can also use the deserializer that comes with boto3 to turn the values from client.get_item() into a Python object.
from boto3.dynamodb.types import TypeDeserializer
def main():
dynamodb_item = {
"PK": {
"S": "key"
},
"SK": {
"S": "value"
}
}
deserializer = TypeDeserializer()
deserialized = {
key: deserializer.deserialize(dynamodb_item[key])
for key in dynamodb_item.keys()
}
print(deserialized) # {'PK': 'key', 'SK': 'value'}
if __name__ == "__main__":
main()

Categories

Resources