Python Boto3 SnapshotNotFound exception. Unable to delete snapshot - python

I'm new to AWS and I have written a Boto3 script which gets the snapshots by OwnerId and delete older snapshots one by one. I'm having a strange issue that boto client finds all Snapshots and when it reaches client.delete_snapshot(SnapshotId=snapshot.snapshot_id) It throws SnapshotNotFoundException - Unable to delete snapshot with id abcd.
It's weird that when i go to AWS Console and search for the ID, Snapshot is there and I can delete it from the AWS Console and I'm using the same account credentials with Boto3 config file.
[default]
aws_access_key_id=foo
aws_secret_access_key=bar
Here is what i have tried.
Boto3 Script
from datetime import datetime, timedelta, timezone
import boto3
ec2 = boto3.resource('ec2')
cl = boto3.client('ec2')
count=0
snapshots = ec2.snapshots.filter(OwnerIds=['xxxxxxxxxx'])
def if_associated_to_ami(client, snapshot_id):
img = client.describe_images(Filters=[{'Name': 'block-device-mapping.snapshot-id', 'Values': [snapshot_id]}])
try:
ami_id = img['Images'][0]['ImageId']
#print("Snapshot(" + snapshot_id + ") is associated to image(" + ami_id + "). Return True")
return True
except IndexError:
#print("Snapshot(" + snapshot_id + ") is not associated to any image. Return False")
return False
for snapshot in snapshots:
if if_associated_to_ami(cl, snapshot.snapshot_id):
print('Unabble to delete Snapshot with Id = {}. Snapshot is in used! '. format(snapshot.snapshot_id))
else:
start_time = snapshot.start_time
delete_time = datetime.now(tz=timezone.utc) - timedelta(days=90)
if delete_time > start_time:
#snapshot.delete()
cl.delete_snapshot(SnapshotId=snapshot.snapshot_id)
print('Snapshot with Id = {} is deleted '. format(snapshot.snapshot_id))
count+=1
if count == 1000:
break
if you face indentation issues, please check the file here.
https://github.com/DeveloperMujtaba/usual-resources/blob/master/boto3.py
can someone please indicate the issue please? It would be much appreciated. Thanks.

Honestly, just by looking at your code, I cannot tell why it bulks at that but also, b/c you already have the snapshot resource, why not just do:
snapshot.delete()

Related

How to use Python boto3 to get count of files/object in s3 bucket older than 60 days?

I'm trying to get the count of all object which are older than 60 days? Is there any way to perform a query or any python boto3 method to get this required output?
Here is code to files or object from S3 bucket older than 60 days.
import json
import boto3
import datetime
import time
from time import mktime
client = boto3.client('s3')
response = client.list_objects(Bucket='angularbuildbucket')
print(response)
today_date_time = datetime.datetime.now().replace(tzinfo=None)
print(today_date_time)
for file in response.get("Contents"):
file_name =file.get("Key")
modified_time = file.get("LastModified").replace(tzinfo=None)
difference_days_delta = today_date_time - modified_time
difference_days = difference_days_delta.days
print("difference_days---", difference_days)
if difference_days > 60:
print("file more than 60 days older : - ", file_name)
Note: Make sure if you running this code locally to set AWS CLI environment and pass profile rightly.

python (boto3) program to delete old snapshots in aws by description

So i am following this guide which is tremendously helpful for creating and deleting snapshots that are older than 3 days. the trouble is, it looks like the python script the author posted deletes all snapshots that are older than 10 days. Within my env, I have level 1's that sometimes create manual snapshots for various reasons, so i cant have this lambda function delete those and I want it to filter the snapshots by descriptions that have
"Created by Lambda backup function ebs-snapshots"
Now, the author posted a way to filter snapshot created, so i attempted to mimic that by filtering descriptions for deletion, but I just want someone to check my work, and/or show me a better way, because what i have thus far is:
( Filters=[{'Description':['Created by Lambda backup function ebs-snapshots']}])
TLDR: How do I add a filter in this code that only targets snapshots with said above description
This is the authors code for deletion:
# Delete snapshots older than retention period
import boto3
from botocore.exceptions import ClientError
from datetime import datetime,timedelta
def delete_snapshot(snapshot_id, reg):
print "Deleting snapshot %s " % (snapshot_id)
try:
ec2resource = boto3.resource('ec2', region_name=reg)
snapshot = ec2resource.Snapshot(snapshot_id)
snapshot.delete()
except ClientError as e:
print "Caught exception: %s" % e
return
def lambda_handler(event, context):
# Get current timestamp in UTC
now = datetime.now()
# AWS Account ID
account_id = '1234567890'
# Define retention period in days
retention_days = 10
# Create EC2 client
ec2 = boto3.client('ec2')
# Get list of regions
regions = ec2.describe_regions().get('Regions',[] )
# Iterate over regions
for region in regions:
print "Checking region %s " % region['RegionName']
reg=region['RegionName']
# Connect to region
ec2 = boto3.client('ec2', region_name=reg)
# Filtering by snapshot timestamp comparison is not supported
# So we grab all snapshot id's
result = ec2.describe_snapshots( OwnerIds=[account_id] Filters=[{'Description':['Created by Lambda backup function ebs-snapshots']}])
for snapshot in result['Snapshots']:
print "Checking snapshot %s which was created on %s" % (snapshot['SnapshotId'],snapshot['StartTime'])
# Remove timezone info from snapshot in order for comparison to work below
snapshot_time = snapshot['StartTime'].replace(tzinfo=None)
# Subtract snapshot time from now returns a timedelta
# Check if the timedelta is greater than retention days
if (now - snapshot_time) > timedelta(retention_days):
print "Snapshot is older than configured retention of %d days" % (retention_days)
delete_snapshot(snapshot['SnapshotId'], reg)
else:
print "Snapshot is newer than configured retention of %d days so we keep it" % (retention_days)
The portion I updated was:
result = ec2.describe_snapshots( OwnerIds=[account_id] Filters=[{'Description':['Created by Lambda backup function ebs-snapshots']}])
is this correct syntactically?

how to display all snapshots in my aws account using python boto3

the aim of this program is to delete snapshots that are older than 60 days. when run it displays the following error " a=snapshot[s].start_time
AttributeError: 'dict' object has no attribute 'start_time' " This is my code
#!/usr/bin/env python
import boto3
import datetime
client = boto3.client('ec2')
snapshot= client.describe_snapshots()
for s in snapshot:
a=snapshot[s].start_time
b=a.date()
c=datetime.datetime.now().date()
d=c-b
if d.days>60 :
snapshot[s].delete(dry_run=True)
Your error is in the line a=snapshot[s].start_time, use a=s.start_time
Note I would change "snapshot" to "snapshots". then in your for loop:
for snapshot in snapshots:
This makes the code easier to read and clear on what your variables represent.
Another item is that start_time is a string. You will need to parse this to get a number. Here is an example to help you:
delete_time = datetime.utcnow() - timedelta(days=days)
for snapshot in snapshots:
start_time = datetime.strptime(
snapshot.start_time,
'%Y-%m-%dT%H:%M:%S.000Z'
)
if start_time < delete_time:
***delete your snapshot here***
This should do it-
import boto3
import json
import sys
from pprint import pprint
region = 'us-east-1'
ec2 = boto3.client('ec2', region)
resp = ec2.describe_instances()
resp_describe_snapshots = ec2.describe_snapshots(OwnerIds=['*******'])
snapshot = resp_describe_snapshots['Snapshots']
snapshots = [''];
for snapshotIdList in resp_describe_snapshots['Snapshots']:
snapshots.append(snapshotIdList.get('SnapshotId'))
for id in snapshots:
print(id)

Python compare current date to a specific date

So basically I have this python kodi which I'm running on Kodi since it's a Kodi addon.
import time
def checkPassword(path, lock=None):
expdate = "10/07/15"
datenow = time.strftime("%x")
if datenow == expdate:
return 'ERROR'
if not lock:
folderConfig = os.path.join(path, FOLDERCFG)
lock = getParam('LOCK', folderConfig)
title = GETTEXT(30069) % path.rsplit(os.sep, 1)[-1]
unlock = getText(title, hidden=True)
if not unlock:
return ''
md5 = utils.generateMD5(unlock)
match = md5 == lock
if not match:
return 'ERROR'
return md5
Basically I have a favourites folder which is locked with password, I want to show an error if the account is expired. I can preset the date from the addon myself like I did in the example, but somehow my addon is giving me an error with this code. Can you tell me what I have wrong? The problem is somewhere in the first 6 lines, since when I remove it, it works, but obviously without the date check.

Amazon S3 boto - how to delete folder?

I created a folder in s3 named "test" and I pushed "test_1.jpg", "test_2.jpg" into "test".
How can I use boto to delete folder "test"?
Here is 2018 (almost 2019) version:
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
bucket.objects.filter(Prefix="myprefix/").delete()
There are no folders in S3. Instead, the keys form a flat namespace. However a key with slashes in its name shows specially in some programs, including the AWS console (see for example Amazon S3 boto - how to create a folder?).
Instead of deleting "a directory", you can (and have to) list files by prefix and delete. In essence:
for key in bucket.list(prefix='your/directory/'):
key.delete()
However the other accomplished answers on this page feature more efficient approaches.
Notice that the prefix is just searched using dummy string search. If the prefix were your/directory, that is, without the trailing slash appended, the program would also happily delete your/directory-that-you-wanted-to-remove-is-definitely-not-t‌​his-one.
For more information, see S3 boto list keys sometimes returns directory key.
I feel that it's been a while and boto3 has a few different ways of accomplishing this goal. This assumes you want to delete the test "folder" and all of its objects Here is one way:
s3 = boto3.resource('s3')
objects_to_delete = s3.meta.client.list_objects(Bucket="MyBucket", Prefix="myfolder/test/")
delete_keys = {'Objects' : []}
delete_keys['Objects'] = [{'Key' : k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]
s3.meta.client.delete_objects(Bucket="MyBucket", Delete=delete_keys)
This should make two requests, one to fetch the objects in the folder, the second to delete all objects in said folder.
https://boto3.readthedocs.org/en/latest/reference/services/s3.html#S3.Client.delete_objects
A slight improvement on Patrick's solution. As you might know, both list_objects() and delete_objects() have an object limit of 1000. This is why you have to paginate listing and delete in chunks. This is pretty universal and you can give Prefix to paginator.paginate() to delete subdirectories/paths
client = boto3.client('s3', **credentials)
paginator = client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=self.bucket_name)
delete_us = dict(Objects=[])
for item in pages.search('Contents'):
delete_us['Objects'].append(dict(Key=item['Key']))
# flush once aws limit reached
if len(delete_us['Objects']) >= 1000:
client.delete_objects(Bucket=bucket, Delete=delete_us)
delete_us = dict(Objects=[])
# flush rest
if len(delete_us['Objects']):
client.delete_objects(Bucket=bucket, Delete=delete_us)
You can use bucket.delete_keys() with a list of keys (with a large number of keys I found this to be an order of magnitude faster than using key.delete).
Something like this:
delete_key_list = []
for key in bucket.list(prefix='/your/directory/'):
delete_key_list.append(key)
if len(delete_key_list) > 100:
bucket.delete_keys(delete_key_list)
delete_key_list = []
if len(delete_key_list) > 0:
bucket.delete_keys(delete_key_list)
If versioning is enabled on the S3 bucket:
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
bucket.object_versions.filter(Prefix="myprefix/").delete()
If one needs to filter by object contents like I did, the following is a blueprint for your logic:
def get_s3_objects_batches(s3: S3Client, **base_kwargs):
kwargs = dict(MaxKeys=1000, **base_kwargs)
while True:
response = s3.list_objects_v2(**kwargs)
# to yield each and every file: yield from response.get('Contents', [])
yield response.get('Contents', [])
if not response.get('IsTruncated'): # At the end of the list?
break
continuation_token = response.get('NextContinuationToken')
kwargs['ContinuationToken'] = continuation_token
def your_filter(b):
raise NotImplementedError()
session = boto3.session.Session(profile_name=profile_name)
s3client = session.client('s3')
for batch in get_s3_objects_batches(s3client, Bucket=bucket_name, Prefix=prefix):
to_delete = [{'Key': obj['Key']} for obj in batch if your_filter(obj)]
if to_delete:
s3client.delete_objects(Bucket=bucket_name, Delete={'Objects': to_delete})
#Deleting a Files Inside Folder S3 using boto3#
def delete_from_minio():
"""
This function is used to delete files or folder inside the another Folder
"""
try:
logger.info("Deleting from minio")
aws_access_key_id='Your_aws_acess_key'
aws_secret_access_key = 'Your_aws_Secret_key'
host = 'your_aws_endpoint'
s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key ,
config=boto3.session.Config(signature_version='your_version'),
region_name="your_region",
endpoint_url=host ,
verify=False)
bucket = s3.Bucket('Your_bucket_name')
for obj in bucket.objects.filter(Prefix='Directory/Sub_Directory'):
s3.Object(bucket.name, obj.key).delete()
except Exception as e:
print(f"Error Occurred while deleting from the S3,{str(e)}")
Hope this Helps :)

Categories

Resources