Imagine the follow functions, that should upload and copy something to S3
class TestAttachments(TestCase):
# YAML is the only serializer supporting binary
#override_env(AWS_DEFAULT_REGION='eu-west-1') # So the test doesn't fail depending on env. vars
#my_vcr.use_cassette(serializer='yaml')
def test_copy_attachments_to_sent_folder(self):
with self.assertRaises(
CopyAttachmentException,
msg="Failed to copy attachments to sent folder for attachment URL: http://example.com/foo.jpg"
) as cm:
copy_attachments_to_sent_folder(["http://example.com/foo.jpg"])
self.assertEqual(cm.exception.__cause__.__class__, InvalidAttachmentURL)
TEST_UUIDS = ["uuid_0"]
with patch.object(uuid, 'uuid4', side_effect=TEST_UUIDS):
result = copy_attachments_to_sent_folder([
f"https://{settings.MESSAGE_ATTACHMENTS_S3_BUCKET}.s3.amazonaws.com/attachments/Test+video.mov"
])
self.assertEqual(
[AttachmentMetadata(
s3_key=f"attachments/sent/{TEST_UUIDS[0]}/video/quicktime/Test video.mov",
filename="Test video.mov",
mime_type="video/quicktime",
size=178653,
)],
result
)
It should test the following function:
def copy_attachments_to_sent_folder(urls: List[str]) -> List[AttachmentMetadata]:
# Copy the attachment to the sent folder in parallel
with futures.ThreadPoolExecutor(max_workers=4) as executor:
urls_by_future = {executor.submit(copy_attachment_to_sent_folder, url): url for url in urls}
results_by_url = {}
for future in futures.as_completed(urls_by_future.keys()):
try:
results_by_url[urls_by_future[future]] = future.result()
except Exception as e:
raise CopyAttachmentException(
f"Failed to copy attachments to sent folder for attachment URL: {urls_by_future[future]}"
) from e
# The futures can complete out-of-order, so we need to re-order them to match the original order here
return [results_by_url[url] for url in urls]
Which finally uses this function inside:
def copy_attachment_to_sent_folder(url: str) -> AttachmentMetadata:
aws_session = attachments_aws_session()
s3_client = aws_session.client("s3")
parse_result = urlparse(url, allow_fragments=False)
# Allow both with/without AWS region in hostname
attachment_hostname_regex = fr"{settings.MESSAGE_ATTACHMENTS_S3_BUCKET}\.s3\.(.+?\.)?amazonaws\.com"
if not (
parse_result.hostname is not None and re.fullmatch(attachment_hostname_regex, parse_result.hostname) is not None
and parse_result.scheme == "https"
):
raise InvalidAttachmentURL(f"URL {url} is not a valid attachment URL")
path = unquote_plus(parse_result.path)
key = path.lstrip("/")
_, _, filename = key.rpartition("/")
object_metadata = get_s3_object_metadata(aws_session=aws_session, object_bucket=settings.MESSAGE_ATTACHMENTS_S3_BUCKET,
object_key=key)
s3_resource = aws_session.resource("s3")
# Place images in their own path so that provider1 only has access to these files
destination_key = f"attachments/sent/{uuid.uuid4()}/{object_metadata.mime_type}/{filename}"
try:
# Copy to sent attachments folder and set content-type
response = s3_resource.Object(settings.MESSAGE_ATTACHMENTS_S3_BUCKET, destination_key).copy_from(
# Tell S3 to set cache header to "Forever"
Expires=datetime(2100, 1, 1),
CopySource={"Bucket": settings.MESSAGE_ATTACHMENTS_S3_BUCKET, "Key": key},
ACL="private",
MetadataDirective="REPLACE",
# Set mime type on the destination file
ContentType=object_metadata.mime_type,
# Only copy it if the user did not modify the file since we fetched it to detect mimetype :)
# S3 allows overwriting of the files. A user could potentially overwrite
# the already uploaded file with a file of a different type after they upload it and after we detect the
# mimetype, but before we copy it to the destination. Setting CopySourceIfUnmodifiedSince prevents that.
CopySourceIfUnmodifiedSince=object_metadata.last_modified,
)
except ClientError as e:
raise CopyAttachmentException from e
if response.get("CopyObjectResult", {}).get("ETag", None) is None:
raise CopyAttachmentException(
f"Copy of object '{key}' to '{destination_key}' was not successful. Response: {response}"
)
return AttachmentMetadata(
s3_key=destination_key,
filename=filename,
mime_type=object_metadata.mime_type,
size=object_metadata.byte_size,
)
This works super fine on botocore==1.23.46
But as soon as we upgrade it, we get presented with the following error:
Error
Traceback (most recent call last):
File "/tl/test/messaging/attachments.py", line 105, in copy_attachments_to_sent_folder
results_by_url[urls_by_future[future]] = future.result()
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/concurrent/futures/_base.py", line 391, in __get_result
raise self._exception
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/tl/test/messaging/attachments.py", line 64, in copy_attachment_to_sent_folder
destination_key = f"attachments/sent/{uuid.uuid4()}/{object_metadata.mime_type}/{filename}"
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/unittest/mock.py", line 1104, in __call__
return self._mock_call(*args, **kwargs)
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/unittest/mock.py", line 1108, in _mock_call
return self._execute_mock_call(*args, **kwargs)
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/unittest/mock.py", line 1165, in _execute_mock_call
result = next(effect)
StopIteration
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/tl/.pyenv/versions/3.10.2/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/tl/test/venv/lib/python3.10/site-packages/vcr/cassette.py", line 100, in __call__
return type(self)(self.cls, args_getter)._execute_function(function, args, kwargs)
File "/tl/test/venv/lib/python3.10/site-packages/vcr/cassette.py", line 114, in _execute_function
return self._handle_function(fn=handle_function)
File "/tl/test/venv/lib/python3.10/site-packages/vcr/cassette.py", line 138, in _handle_function
return fn(cassette)
File "/tl/test/venv/lib/python3.10/site-packages/vcr/cassette.py", line 107, in handle_function
return function(*args, **kwargs)
File "/tl/test/messaging/test_attachments.py", line 27, in test_copy_attachments_to_sent_folder
result = copy_attachments_to_sent_folder([
File "/tl/test/messaging/attachments.py", line 107, in copy_attachments_to_sent_folder
raise CopyAttachmentException(
messaging.attachments.CopyAttachmentException: Failed to copy attachments to sent folder for attachment URL: https://test-message-attachments.s3.amazonaws.com/attachments/Test+video.mov
I assume it has to do with destination_key = f"attachments/sent/{uuid.uuid4()}/{object_metadata.mime_type}/{filename}" but I am not sure what I am doing wrong? And why exactly it works on botocore==<1.2.46 and not after.
Related
When trying to get information about a virtual machine, I am met with the following error:
Traceback (most recent call last):
File "/Users/shilpakancharla/Documents/function_app/WeedsMediaUploadTrigger/event_process.py", line 73, in <module>
vm_data, vm_status = get_azure_vm(key.RESOURCE_GROUP, key.VIRTUAL_MACHINE_NAME)
File "<decorator-gen-2>", line 2, in get_azure_vm
File "/usr/local/lib/python3.9/site-packages/retry/api.py", line 73, in retry_decorator
return __retry_internal(partial(f, *args, **kwargs), exceptions, tries, delay, max_delay, backoff, jitter,
File "/usr/local/lib/python3.9/site-packages/retry/api.py", line 33, in __retry_internal
return f()
File "/Users/shilpakancharla/Documents/function_app/WeedsMediaUploadTrigger/event_process.py", line 65, in get_azure_vm
vm_instance = compute_client.virtual_machines.get(resource_group_name,
File "/usr/local/lib/python3.9/site-packages/azure/mgmt/compute/v2019_12_01/operations/_virtual_machines_operations.py", line 641, in get
map_error(status_code=response.status_code, response=response, error_map=error_map)
File "/usr/local/lib/python3.9/site-packages/azure/core/exceptions.py", line 102, in map_error
raise error
azure.core.exceptions.ResourceNotFoundError: Operation returned an invalid status 'Not Found'
Does this mean that get() is not a valid function for compute_client.virtual_machines? It says that it is valid in the documentation, but I am not sure why this error is arising. This is my implementation, and I use the call vm_data, vm_status = get_azure_vm(key.RESOURCE_GROUP, key.VIRTUAL_MACHINE_NAME). I am confident that all my credentials are correct.
def get_access_to_virtual_machine():
subscription_id = key.SUBSCRIPTION_ID
credentials = DefaultAzureCredential(authority = AzureAuthorityHosts.AZURE_GOVERNMENT,
exclude_environment_credential = True,
exclude_managed_identity_credential = True,
exclude_shared_token_cache_credential = True)
client = KeyClient(vault_url = key.VAULT_URL, credential = credentials)
compute_client = ComputeManagementClient(credentials, subscription_id,
base_url = 'https://portal.azure.us')
return compute_client
def get_azure_vm(resource_group_name, virtual_machine_name):
compute_client = get_access_to_virtual_machine()
vm_instance = compute_client.virtual_machines.get(resource_group_name,
virtual_machine_name)
vm_status = vm_instance.instance_view.statuses[1].display_status
return vm_instance, vm_status
You should specify expand , if you want to get a VM instance View . Try the code below :
compute_client.virtual_machines.get(resource_group_name, virtual_machine_name,expand='instanceView').instance_view.statuses[1].display_status
Result:
I created a .xls file in my python program that I try to push to S3 with boto.
My code:
just_models = [{"key": "1"}, {"key2": 2}]
df_just_models = pd.DataFrame(just_models)
# Notice desired file extension.
just_models = df_just_models.to_excel('../file.xls')
def save_just_models():
# Save to S3
date_file = datetime.now().strftime("%Y-%m-%d")
s3 = boto3.resource('s3')
obj = s3.Object('directory', 'indice_na/just_models' + date_file + '_indice_na.xls')
obj.put(Body=just_models)
save_just_models()
My error:
Traceback (most recent call last):
File "indicena.py", line 11985, in <module>
save_just_models()
File "indicena.py", line 11984, in save_just_models
obj.put(Body=just_models)
File "/home/bolgi/.local/lib/python3.8/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/bolgi/.local/lib/python3.8/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(*args, **params)
File "/home/bolgi/.local/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/bolgi/.local/lib/python3.8/site-packages/botocore/client.py", line 598, in _make_api_call
request_dict = self._convert_to_request_dict(
File "/home/bolgi/.local/lib/python3.8/site-packages/botocore/client.py", line 646, in _convert_to_request_dict
request_dict = self._serializer.serialize_to_request(
File "/home/bolgi/.local/lib/python3.8/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter Body, value: None, type: <class 'NoneType'>, valid types: <class 'bytes'>, <class 'bytearray'>, file-like object
The error came from the obj.put() but I don't know exactly how to resolve it.
Yes, you can write directly to S3 from memory. No need to save the xls file on your local hard drive. This is possible as you can write to BytesIO instead of the file, and the send the BytesIO to S3 as shown below. Also default pandas xlsx writer is deprecated, so you may consider using newer one such as xlsxwriter:
import io
import pandas as pd
just_models = [{"key": "1"}, {"key2": 2}]
df_just_models = pd.DataFrame(just_models)
mem_file = io.BytesIO();
df_just_models.to_excel(mem_file, engine='xlsxwriter')
def save_just_models():
# Save to S3
date_file = datetime.now().strftime("%Y-%m-%d")
s3 = boto3.resource('s3')
obj = s3.Object('directory', 'indice_na/just_models' + date_file + '_indice_na.xls')
obj.put(Body=mem_file.getvalue())
save_just_models()
I'm trying to retrieve some data and mail it out from within a Google Cloud Function using SendGrid
I tried convert the data as a list of dictionaries (with the same, flat, structure) to CSV as detailed here but this fails as the filesystem is read only.
To counter this, I use io.StringIO() to store the CSV in-memory.
However, I get the following error/stack trace during execution:
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py",
line 383, in run_background_function_function_handler.invoke_user_function(event_object)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py",
line 217, in invoke_user_function return call_user_function(request_or_event)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py",
line 214, in call_user_function event_context.Context(**request_or_event.context))
File "/user_code/main.py", line 267, in session_updated summarize_session(sessionid, doctorid) File "/user_code/main.py", line 375, in summarize_session send_email([docemail], data)
File "/user_code/main.py", line 328, in send_email response = sg.send(message)
File "/env/local/lib/python3.7/site-packages/sendgrid/sendgrid.py",
line 98, in send response = self.client.mail.send.post(request_body=message.get())
File "/env/local/lib/python3.7/site-packages/python_http_client/client.py",
line 251, in http_request data = json.dumps(request_body).encode('utf-8')
File "/opt/python3.7/lib/python3.7/json/__init__.py",
line 231, in dumps return _default_encoder.encode(obj)
File "/opt/python3.7/lib/python3.7/json/encoder.py",
line 199, in encode chunks = self.iterencode(o, _one_shot=True)
File "/opt/python3.7/lib/python3.7/json/encoder.py",
line 257, in iterencode return _iterencode(o, 0)
File "/opt/python3.7/lib/python3.7/json/encoder.py",
line 179, in default raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type StringIO is not JSON serializable
The code is as follows:
def send_email(to_emails, datadict):
message = Mail(
from_email='info#domain.com',
#to_emails= to_emails,
to_emails= ['dude#domain.com'],
subject='Summary of your session',
html_content='<strong>Data Summary</strong>\
<p>This email is a summary of your session. Please check the attachment for details. </p>')
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
keys = datadict[0].keys()
try:
output_file = io.StringIO()
#output_file = io.BytesIO() # results in a typeerror
# https://stackoverflow.com/questions/34283178/typeerror-a-bytes-like-object-is-required-not-str-in-python-and-csv
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(datadict)
print("Attaching")
message.attachment = [
Attachment(FileContent(output_file),
FileType('text/csv'),
FileName('sessiondata.csv'),
Disposition('inline'),
ContentId('SessionData')),
]
except Exception as e:
print("Exception:")
print(e)
return
response = sg.send(message)
How do I convert the list of dictionaries to a CSV and attach it to an email without opening a physical file on the filesystem?
I am working with a mobile game API and a Telegram Bot. It worked when I put in a fixed Clantag, but now I wanted to let the user write a tag and add it to the link. The app then should search for the clan and get the right stats. Everything is ok but I get this message now and cannot find my mistake. Would be happy if you could help!
def main():
last_update_id = None
message = ""
while True:
updates = get_updates(last_update_id)
if len(updates["result"]) > 0:
last_update_id = get_last_update_id(updates) + 1
message = get_last_update_Message(updates)
clan_stats(updates, message)
def get_last_update_Message(updates):
message = ""
for update in updates["result"]:
message = update["message"]
return message["text"]
def clan_stats(updates, ID):
#https://api.royaleapi.com/clan/1RLU78YU
Link = '"https://api.royaleapi.com/clan/' + ID + '"'
r=requests.get(Link, headers={"Accept":"application/json",
"authorization":"Bearer TOKENHERE"})
clan = r.json()
Full Traceback:
Traceback (most recent call last):
File "/home/Lee63225/clashroyaleclanbot.py", line 188, in <module>
main()
File "/home/Lee63225/clashroyaleclanbot.py", line 184, in main
clan_stats(updates, message)
File "/home/Lee63225/clashroyaleclanbot.py", line 80, in clan_stats
"authorization":"Bearer TOKENHERE"})
File "/usr/lib/python3.7/site-packages/requests/api.py", line 72, in get
return request('get', url, params=params, **kwargs)
File "/usr/lib/python3.7/site-packages/requests/api.py", line 58, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/lib/python3.7/site-packages/requests/sessions.py", line 503, in request
prep.url, proxies, stream, verify, cert
File "/usr/lib/python3.7/site-packages/requests/sessions.py", line 676, in merge_environment_settings
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
File "/usr/lib/python3.7/site-packages/requests/utils.py", line 760, in get_environ_proxies
if should_bypass_proxies(url, no_proxy=no_proxy):
File "/usr/lib/python3.7/site-packages/requests/utils.py", line 716, in should_bypass_proxies
if is_ipv4_address(parsed.hostname):
File "/usr/lib/python3.7/site-packages/requests/utils.py", line 640, in is_ipv4_address
socket.inet_aton(string_ip)
TypeError: inet_aton() argument 1 must be str, not None
Thank you!
I think it should rather be
Link = 'https://api.royaleapi.com/clan/' + ID
There is some surrounding " in your attempt.
But now as I look, the function is called as clan_stats(update,message). That "message" should be a clantag, make sure it is (as now it comes from get_last_update_Message(), and looks very suspicious.
So I'm trying to get Instagram photos that fit certain parameters and I'm getting the following stack:
Traceback (most recent call last):
File "instagram_find_shows.py", line 83, in <module>
if __name__ == "__main__": main()
File "instagram_find_shows.py", line 48, in main
get_instagram_posts(show_name, show_time, coordinates)
File "instagram_find_shows.py", line 73, in get_instagram_posts
str(coordinates[1]), min_time, max_time)
File "C:\Users\User Name\Anaconda3\lib\site-packages\instagram\bind.py", line 197, in _call
return method.execute()
File "C:\Users\User Name\Anaconda3\lib\site-packages\instagram\bind.py", line 189, in execute
content, next = self._do_api_request(url, method, body, headers)
File "C:\Users\User Name\Anaconda3\lib\site-packages\instagram\bind.py", line 163, in _do_api_request
raise InstagramAPIError(status_code, content_obj['meta']['error_type'], content_obj['meta']['error_message'])
instagram.bind.InstagramAPIError: (400) OAuthPermissionsException-This request requires scope=public_content, but this access token is not authorized with this scope. The user must re-authorize your application with scope=public_content to be granted this permissions.
The code is as follows:
def get_instagram_posts(name, time, coordinates):
max_time_dt = time + timedelta(hours=3)
min_time_dt = time - timedelta(hours=1)
max_time = str(calendar.timegm(max_time_dt.timetuple()))
min_time = str(calendar.timegm(min_time_dt.timetuple()))
dist_rad_str = str(insta_dist_radius_m)
count_str = str(insta_count)
api = InstagramAPI(access_token=insta_access_token,
client_secret=insta_client_secret)
r = api.media_search(name, count_str, str(coordinates[0]),
str(coordinates[1]), min_time, max_time)
photos = []
for media in r:
photos.append('<img src="%s"/>' % media.images['thumbnail'].url)
print(photos[0])
I can't figure out what to do... Literally I'm just trying to do a simple test, not trying to cripple their API. Is there any way to do this within Instagram's parameters? Thanks so much!
Fixed by going to the following URL in the browser:
https://www.instagram.com/oauth/authorize?client_id=[CLIENT_ID]&redirect_uri=[REDIRECT_URI]&response_type=code&scope=basic+public_content+follower_list+comments+relationships+likes