I have the following code:
from aws_cdk import (
aws_ec2 as ec2,
core,
)
class MyVpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
vpc = ec2.Vpc(
self, 'MyVpc',
cidr='10.10.10.0/23',
max_azs=2
)
dhcp_options = ec2.CfnDHCPOptions(
self, 'MyDhcpOptions',
domain_name='aws-prod.mydomain.com',
domain_name_servers=['10.1.1.5','10.2.1.5'],
ntp_servers=['10.1.1.250','10.2.1.250'],
)
dhcp_options_associations = ec2.CfnVPCDHCPOptionsAssociation(
self, 'MyDhcpOptionsAssociation',
dhcp_options_id=dhcp_options.logical_id,
vpc_id=vpc.vpc_id
)
It generates VPCDHCPOptionsAssociation property INCORRECTLY for this in CloudFormation template like this:
MyDhcpOptionsAssociation:
Type: AWS::EC2::VPCDHCPOptionsAssociation
Properties:
DhcpOptionsId: MyDhcpOptions
VpcId:
Ref: myvpcAB8B6A91
I need this section in CloudFormation template to be like this (CORRECT):
MyDhcpOptionsAssociation:
Type: AWS::EC2::VPCDHCPOptionsAssociation
Properties:
DhcpOptionsId:
Ref: MyDhcpOptions
VpcId:
Ref: myvpcAB8B6A91
If I use dhcp_options_id=dhcp_options.id, I get error AttributeError: 'CfnDHCPOptions' object has no attribute 'id'.
If I use dhcp_options_id=dhcp_options.dhcp_options_id, I get error AttributeError: 'CfnDHCPOptions' object has no attribute 'dhcp_options_id'.
Here is the CDK API reference for this: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/CfnVPCDHCPOptionsAssociation.html
I found it. It has to be .ref, not consistent though with other resource properties.
dhcp_options_associations = ec2.CfnVPCDHCPOptionsAssociation(
self, 'MyDhcpOptionsAssociation',
dhcp_options_id=dhcp_options.ref,
vpc_id=vpc.vpc_id
)
Related
I'm trying to create a s3 bucket but I keep getting this error code:
File "/home/ec2-user/environment/homework_1.py", line 64, in create_bucket
region = self.bucket.meta.client.meta.region_name
AttributeError: 'NoneType' object has no attribute 'bucket'
My code is as follows:
def create_bucket(self=None, region_override=None):
"""Create an Amazon S3 bucket in the default Region for the account or in the
specified Region."""
logger = logging.getLogger(__name__)
bucket = None
if region_override is not None:
region = region_override
else:
region = self.bucket.meta.client.meta.region_name
try:
self.bucket.create(CreateBucketConfiguration={'LocationConstraint': region})
self.bucket.wait_until_exists()
logger.info("Created bucket '%s' in region=%s", self.bucket.name, region)
except ClientError as error:
logger.exception("Couldn't create bucket named '%s' in region=%s.", self.bucket.name, region)
raise error
I've tried to run the code without that particular line in the code thinking that would fix it but ended up with the same error message on "self.bucket.create(CreateBucketConfiguration={'LocationConstraint': region})". I'm new to cloud programming. Can someone help?
It looks like you need to use this function as a method of a class where self refers to the class instantiation. Eg:
import boto3
class YourClass:
def __init__(self, bucket_name):
self.bucket_name = bucket_name
self.bucket = boto3.resource('s3').Bucket(self.bucket_name)
def create_bucket(self, region_override=None):
... your function code
Then instantiate your class and call the function like this:
myBucketName = "SomeNewBucket"
myBucketClass = YourClass(myBucketName)
myBucketClass.create_bucket()
For documentation see: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#bucket
So, I can list my instances by zones using this API.
GET https://compute.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances.
I want now to filter my instances by region. Any idea how can I do this (using python)?
You can use aggregated_list(), to list all your instances on your project. Filtering via region could be done on the actual code. See code below where I used regex to mimic a filter using region variable.
from typing import Dict, Iterable
from google.cloud import compute_v1
import re
def list_all_instances(
project_id: str,
region: str
) -> Dict[str, Iterable[compute_v1.Instance]]:
instance_client = compute_v1.InstancesClient()
request = {
"project" : project_id,
}
agg_list = instance_client.aggregated_list(request=request)
all_instances = {}
print("Instances found:")
for zone, response in agg_list:
if response.instances:
if re.search(f"{region}*", zone):
all_instances[zone] = response.instances
print(f" {zone}:")
for instance in response.instances:
print(f" - {instance.name} ({instance.machine_type})")
return all_instances
list_all_instances(project_id="your-project-id",region="us-central1") #used us-central1 for testing
NOTE: Code above is from this code. I just modified it to apply the filtering above.
Actual instances on my GCP account:
Result from code above (only zones with prefix us-central1 were displayed):
Since the resources on AWS have been created by manual on console. e.g.
Rule, EventBus, APIDestination (Target). Means these resource doesn't provide any cdk code.
Point is I want to add more Rule with existing EventBus and APIDestination (Target)**. Then customize input_transformer in targets within cdk code.
from aws_cdk import aws_events as events, aws_events_targets as targets
class TheDestinedLambdaStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
new_rule = events.Rule(
self,
"rule",
event_pattern=events.EventPattern(),
event_bus=events.from_event_bus_arn(), # imported
targets=#APIDestination with params and transformer, dont know method ???
)
It's possible to implement this?
or anyone know which method of EventTarget able to import existed resource to cdk?
Docs:
https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_events/EventBus.html
The L1 CfnRule construct can create a new Rule targeting an existing API Destination and custom bus. It can also optionally apply input transforms:
events.CfnRule(
self,
"Rule",
event_bus_name="my-bus-name",
event_pattern={"source": ["cdk-test"]},
targets=[
events.CfnRule.TargetProperty(
arn="arn:aws:events:us-east-1:xxxx:api-destination/xxxxxxxx",
id="foo_rule",
role_arn="arn:aws:iam::xxxxx:role/xxxxxxxxx",
input_transformer=events.CfnRule.InputTransformerProperty(
input_paths_map={"detail-type": "$.detail-type"},
input_template=json.dumps(
{
"transformed": '{"name": "DETAIL_TYPE", "value": <detail-type>}'
}
),
),
)
],
)
While trying to write a simple CDK script to update the aws-auth ConfigMap, I get the error Object of type aws-cdk-lib.Resource is not convertible to aws-cdk-lib.aws_eks.Cluster. The error seems to stem from the Cluster reference, but I'm not sure why since .from_cluster_attributes returns an ICluster interface.
class EksCdkStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
cluster = eks.Cluster.from_cluster_attributes(self, "Cluster", cluster_name="megaXL")
role = iam.Role.from_role_arn(self, "Role", "arn:aws:iam::123456789012:role/delete_me_role")
eks.AwsAuth(self, "Auth", cluster=cluster).add_role_mapping(role=role, groups="system:masters")
The error seems to stem from the Cluster reference, but I'm not sure why since .from_cluster_attributes returns an ICluster interface.
Almost. AwsAuth requires a Cluster, and you're passing ICluster. This means that you can't create an AwsAuth resource with an imported Cluster.
i.e.
my_project.my_stack.py
from aws_cdk import core
from aws_cdk.aws_s3 import Bucket
class MyStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.s3_bucket = Bucket(self, "s3-bucket-id")
app.py
from aws_cdk import core
from my_project.my_stack import MyStack
app = core.App()
my_stack = MyStack(app, "my-stack")
app.synth()
if __name__ == "__main__":
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket(my_stack.s3_bucket.bucket_name)
# ^ raises -- AttributeError: 's3.ServiceResource' object has no attribute 'bucket_name'
Running python app.py would raise AttributeError: 's3.ServiceResource' object has no attribute 'bucket_name'
when aws-cdk instantiates a class that represents a resource, its not necessarily something that has been deployed. The value of s3_bucket.bucket_name is a token that represents the bucket name string for reference in other cloudformation resources. The best way to get the bucket name in a non-cdk app after it has been deployed is to store that value somewhere thats accessible from your app.
This could be a cloudformation output value using CfnOutput or a SSM parameter. You could do something like:
from aws_cdk import core
from aws_cdk.aws_s3 import Bucket
from aws_cdk.aws_ssm import StringParameter
class MyStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.s3_bucket_name_param_name = "my-parameter-name"
self.s3_bucket = Bucket(self, "s3-bucket-id")
StringParameter(self, "s3-bucket-name-param",
parameter_name=self.s3_bucket_name_param,
string_value=self.s3_bucket.bucket_name)
Then fetch the param value from ssm. Or you can just statically name the bucket and reference the bucket name string instead of the parameter name.
The idea of being able to reference resources like this transparently during an applications runtime has been experimented with, punchcard being the most notable example, but right now construct values that are tokenized are only understood in the context of CDK apps.