Looking develop a python script that will scan EC2 instances for Public IP's and if found, stop those instances.
I'm sorta new to Python so have challenges trying to put two pieces of code together.
ec2Resource = boto3.resource('ec2')
def lambda_handler(event, context):
instances = ec2Resource.instances.all()
for instance in instances:
#print("inst.public_ip_address",inst.public_ip_address)
if instance.public_ip_address:
print("Instance ID: ",instance.id," , Instance Platform: ",instance.platform," , Public IP: ",instance.public_ip_address,", Instance Type:",instance.instance_type,",Instance Image Id: ",instance.image.id)
response = client.stop_instances(
InstanceIds=[
'string',
],
Hibernate=True,
DryRun=False,
Force=False
)
Basically, looking for a automated script that discovers public IPs on EC2's and then stops them. My apologies if the script above looks hamburglar
UPDATE:
Think I cleaned it up to create lists and then from that list, issue a stop.
#!/usr/bin/python
'''
Finds instance id, Instance Platform, Public IP, instance type based on tags.
Returns a list of instances found
'''
import boto3
def instances_find(name, value):
'''
Finds instance id's based on tags.
Returns a list of instances found.
'''
list_instances = []
# filter based on tags
filters =[
{
'Name': name,
'Values': [
value,
]
},
]
instances = ec2_resource.instances.filter(Filters=filters)
for instance in instances:
# for each instance, append to list
list_instances.append("Instance ID: ",instance.id," , Instance Platform: ",instance.platform," , Public IP: ",instance.public_ip_address,", Instance Type:",instance.instance_type,",Instance Image Id: ",instance.image.id)
return list_instances
def instances_stop(list):
'''
Stops instances defined in the list.
'''
ec2_client.stop_instances(InstanceIds=list)
# enter tag name and value
tag_name = 'tag:environment'
tag_value = 'dev'
ec2_resource = boto3.resource('ec2')
ec2_client = boto3.client('ec2')
# find instances
ec2_list = instances_find(tag_name, tag_value)
# stop instances
ec2_stop = instances_stop(ec2_list)
print('stopped instances: ' + str(ec2_list))
Related
I am intending to set up an Azure Blob storage data source for great expectations. The setup is done with the following string and seems to work, given it lists some files in my blob storage.
example_yaml = f"""
name: {datasource_name}
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
azure_options:
account_url: MYURL
credential: MYKEY
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetAzureDataConnector
azure_options:
account_url: MYURL
credential: MYKEY
container: machinelearning
name_starts_with: waermeprognose/output_neuralnet/FVS/FERNWAERME.1
default_regex:
pattern: (.*)\_FVS_FERNWAERME.1_waermeprognose.parquet
group_names:
- date
"""
Context.test_yaml_config(yaml_config=example_yaml) then returns
Attempting to instantiate class from config...
Instantiating as a Datasource, since class_name is Datasource
Successfully instantiated Datasource
ExecutionEngine class name: PandasExecutionEngine
Data Connectors:
default_inferred_data_connector_name : InferredAssetAzureDataConnector
Available data_asset_names (1 of 1):
DEFAULT_ASSET_NAME (3 of 354): ['waermeprognose/output_neuralnet/FVS/FERNWAERME.1/2021-12-01_FVS_FERNWAERME*1_waermeprognose*parquet', 'waermeprognose/output_neuralnet/FVS/FERNWAERME.1/2021-12-02_FVS_FERNWAERME*1_waermeprognose*parquet', 'waermeprognose/output_neuralnet/FVS/FERNWAERME.1/2021-12-03_FVS_FERNWAERME*1_waermeprognose*parquet']
Unmatched data_references (0 of 0):[]
default_runtime_data_connector_name:RuntimeDataConnector
Available data_asset_names (0 of 0):
Note : RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest
Unmatched data_references (0 of 0): []
I then save the source to my context using sanitize_yaml_and_save_datasource(context, example_yaml, overwrite_existing=True), but when I try to set up some expectations, the Batch Request is empty:
context = DataContext(
context_root_dir=r'C:\Users\Philip\Documents\Projekte\X\Python\tests\great_expectations'
)
batch_request_parameters = {
"datasource_name": "dlsbeoptdev_machinelearning_waermeprognose_output_neuralnet",
"data_connector_name": "default_inferred_data_connector_name",
"data_asset_name": "DEFAULT_ASSET_NAME",
}
batch_request = BatchRequest(**batch_request_parameters)
print(context.get_batch_list(batch_request=batch_request))
Only return as empty list which I can't use to set up a validator. Did I miss any parameters to request a batch?
In AWS, I have a centralized networking account that defines all the VPCs and subnets. And each VPC is shared with target accounts using Resource Access Manager (RAM). Given an IP, need to find out the target account ID with which the VPC/subnet has been shared with. Here is what I have done so far:
In the code below, vpc parameter contains the vpc lookup response and and ip_addr is the IP address we are looking for
def lookup_ipaddr (session, ec2_client, vpc, ip_addr):
found = False
if (ipaddress.ip_address(ip_addr) in ipaddress.ip_network(vpc['CidrBlock'])):
filters = [{'Name':'vpc-id', 'Values':[ vpc['VpcId'] ]}]
subnets = ec2_client.describe_subnets( Filters = filters )['Subnets']
for subnet in subnets:
if (ipaddress.ip_address(ip_addr) in ipaddress.ip_network(subnet['CidrBlock'])):
found = True
tags = subnet['Tags']
# tags returned by previous api is in different form than that required by RAM
for tag in tags:
tag['tagKey'] = tag['Key']
tag['tagValues'] = [tag['Value']]
del tag['Key']
del tag['Value']
print("\n\n")
print (tags)
print("\n\n")
resourceArn = subnet['SubnetArn']
ram_client = session.client('ram')
resp = ram_client.get_resource_shares (resourceOwner = 'SELF', tagFilters=tags)
However the API call get_resource_shares doesn't return any response (except Response Metadata). Any suggestion on how to find out the destination account ID/Principal with which the subnet was shared?
After a bit of digging, I was able to obtain the destination account id by using list_principals api of AWS Resource Access Manager (RAM): https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ram.html#RAM.Client.list_principals
Here is the full python code:
def lookup_ipaddr (session, ec2_client, vpc, ip_addr):
found = False
filters = [{'Name':'vpc-id', 'Values':[ vpc['VpcId'] ]}]
subnets = ec2_client.describe_subnets( Filters = filters )['Subnets']
for subnet in subnets:
if (ipaddress.ip_address(ip_addr) in ipaddress.ip_network(subnet['CidrBlock'])):
resourceArn = subnet['SubnetArn']
ram_client = session.client('ram')
resp = ram_client.list_principals(
resourceOwner = 'SELF',
resourceArn = resourceArn
)
print(f"Subnet {subnet['SubnetId']} is shared with account [{resp['principals'][0]['id']}]")
found = True
break
return found
I'm trying to write a python script using boto3 in order to get hourly prices of an instance, given the instance ID. I should remark that I'm not speaking about costs that you can get from cost explorer, I'm speaking about nominal hourly price, for example for an 'ec2' instance.
I've already found some examples using "boto3.client('pricing',...)" and a bunch of parameters and filters as in:
https://www.saisci.com/aws/how-to-get-the-on-demand-price-of-ec2-instances-using-boto3-and-python/
which also requires region code to region name conversion.
I would like not to have to specify every instance detail and parameter for that query.
Can anybody help me to find a way to get that info just having the ec2 instance ID?
Thanks in advance.
You have to pass all that information. If you want to write a script that takes an instance ID and returns the hourly price, then you would first need to use the instance ID to lookup the instance details, and then pass those details to the pricing query.
You have to specify most of the information but not all of it.
For example, region_name is optional if you:
Have configured AWS CLI on the machine on which your Python script is running (ie. ~/.aws/config is present and the region is configured).
OR
You are running the Python script on an AWS resource that has a role attached to it with a policy that allows you to retrieve the spot pricing information.
For example, I am able to run this script that retrieves my current spot instances and gets their current hourly cost, and calculates a bid price for me based on the spot price history for that particular instance type without specifying the region anywhere:
#!/usr/bin/env python3
import boto3
import json
from datetime import datetime
from datetime import timedelta
from collections import namedtuple
def get_current_pricing():
pricing = []
ec2_client = boto3.client('ec2')
ec2_resource = boto3.resource('ec2')
response = ec2_client.describe_spot_instance_requests()
spot_instance_requests = response['SpotInstanceRequests']
for instance_request in spot_instance_requests:
if instance_request['State'] == 'active':
instance = ec2_resource.Instance(instance_request['InstanceId'])
for tag in instance.tags:
if tag['Key'] == 'Name':
application = tag['Value']
break
price = {
'application': application,
'instance_type': instance_request['LaunchSpecification']['InstanceType'],
'current_price': float(instance_request['SpotPrice']),
'bid_price': get_bid_price(instance_request['LaunchSpecification']['InstanceType'])
}
pricing.append(price)
return pricing
def get_bid_price(instancetype):
instance_types = [instancetype]
start = datetime.now() - timedelta(days=1)
ec2 = boto3.client('ec2')
price_dict = ec2.describe_spot_price_history(
StartTime=start,
InstanceTypes=instance_types,
ProductDescriptions=['Linux/UNIX']
)
if len(price_dict.get('SpotPriceHistory')) > 0:
PriceHistory = namedtuple('PriceHistory', 'price timestamp')
for item in price_dict.get('SpotPriceHistory'):
price_list = [PriceHistory(round(float(item.get('SpotPrice')), 5), item.get('Timestamp'))]
price_list.sort(key=lambda tup: tup.timestamp, reverse=True)
bid_price = round(float(price_list[0][0]), 5)
leeway = round(float(bid_price / 100 * 10), 5)
bid_price = round(float(bid_price + leeway), 5)
return bid_price
else:
raise ValueError(f'Invalid instance type: {instancetype} provided. '
'Please provide correct instance type.')
if __name__ == '__main__':
current_pricing = get_current_pricing()
print(json.dumps(current_pricing, indent=4, default=str))
I am using lambda_cache library in python to implement caching for coupld of parameters stored in ssm. The following code is working
as expected.
from lambda_cache import ssm
#ssm.cache(parameter=['applicationId', 'sharedSecret'], entry_name='parameters', max_age_in_seconds=300)
def lambda_handler(event, context):
applicationId = getattr(context,'parameters').get('applicationId')
sharedSecret = getattr(context,'parameters').get('sharedSecret')
#rest of code
I want to implement a unit test where I need to assign some values to applicationId and sharedSecret. I started as
follows
#mock_ssm
def test_lambda_handler1(self):
ssm = boto3.client('ssm', region_name=REGION_NAME)
response = ssm.put_parameter (
Name = 'parameters',
Value = 'applicationIdValue',
KeyId='applicationId',
Type = 'String',
Overwrite = True
)
response = ssm.put_parameter (
Name = 'parameters',
Value = 'sharedSecretValue',
KeyId = 'sharedSecret',
Type = 'String',
Overwrite = True
)
lambda_handler(event, None)
this has thrown the error 'NoneType' object has no attribute 'parameters'.
So I then created a context as
context = {
'parameters':'parameters'
}
and then called the lambda as
lambda_handler(event, context)
it then thrown the error 'dict' object has no attribute 'parameters'
I have then tried creating a class
class context:
parameters = {"applicationId":"testapplicationId", "sharedSecret":"testsharedSecret"}
and in the unit test
s1 = context()
lambda_handler(event,s1)
But this is returning None for both applicationId and sharedSecret as getattr(context, 'parameters') itself show the len() as 0.
Parameters set in the context class in TestCase are not passed to the lamdba_handler. I think it only allows the attributes defined in the link https://docs.aws.amazon.com/lambda/latest/dg/python-context.html like aws_request_id,memory_limit_in_mb , etc. How can I mock ssm cache to get the parameter values?
I've created a lampda function for stopping ec2 instances with a specific tag, python code is presented below. Main task of this solution is to stop all instances with a tag "name: purpose, value: temp". When I execute this script all ec2 instances are being stopped. I suppose that something is wrong with this following filter instances = ec2.instances.filter(Filters=[{'Name': 'tag:purpose', 'Values': ['temp']}]).
Function code below:
import boto3
def lambda_handler(event, context):
client = boto3.client('ec2')
ec2_regions = [region['RegionName'] for region in client.describe_regions()['Regions']]
for region in ec2_regions:
ec2 = boto3.resource('ec2',region_name=region)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:purpose', 'Values': ['temp']}])
RunningInstances = [instance.id for instance in instances]
for i in RunningInstances:
stoppingInstances = ec2.instances.stop(i)
Your filter for tags is wrong you will need to change it:
filters = [{
'Name': 'tag:Name',
'Values': ['Shut']
},
{
'Name': 'instance-state-name',
'Values': ['running']
}
]
Here is complete working example:
import boto3
#define the connection
ec2 = boto3.resource('ec2')
def lambda_handler(event, context):
# Use the filter() method of the instances collection to retrieve
# all running EC2 instances.
filters = [{
'Name': 'tag:Name',
'Values': ['Shut']
},
{
'Name': 'instance-state-name',
'Values': ['running']
}
]
#filter the instances
instances = ec2.instances.filter(Filters=filters)
#locate all running instances
RunningInstances = [instance.id for instance in instances]
#print the instances for logging purposes
#print RunningInstances
#make sure there are actually instances to shut down.
if len(RunningInstances) > 0:
#perform the shutdown
shuttingDown = ec2.instances.filter(
InstanceIds=RunningInstances).stop()
print(shuttingDown)
else:
print("No Instances to shut down")
Try this code. I have made a small change
import boto3
def lambda_handler(event, context):
client = boto3.client('ec2')
ec2_regions = [region['RegionName'] for region in client.describe_regions(['Regions']]
for region in ec2_regions:
ec2 = boto3.resource('ec2',region_name=region)
instances = ec2.describe_instances(Filters=[{'Name': 'tag:purpose', 'Values': ['temp']}]) #you can try by adding describe_instances()
RunningInstances = [instance.id for instance in instances]
for i in RunningInstances:
stoppingInstances = ec2.instances.stop(i)