YAML not well formed - python

Im trying to stop my RDS cluster using Lambda and Im using the python code shown below to target the clusters tags,
However when trying to create a CloudFormation stack, AWS is telling me that the YAML is not well formed.
For example it tells that Key = 'True' is not well formed.
If I remove that bit of code it then tells me the next bit of code, client = boto3.client('rds') is also not well formed.
Ive put this code into an online python validator and it didnt report any issues.
Can anyone help with this? Thanks
Tag = 'AutoPower'
Key = 'True'
client = boto3.client('rds')
response = client.describe_db_cluster()
for resp in response['DBCluster']:
db_cluster_arn = resp['DBClusterArn']
response = client.list_tags_for_resource(ResourceName=db_cluster_arn)
for tags in response['TagList']:
if tags['Key'] == str(Key) and tags['Value'] == str(Tag):
status = resp['DBClusterStatus']
ClusterID = resp['DBClusterIdentifier']
print(InstanceID)
if status == 'available':
print("shutting down %s " % ClusterID)
client.stop_db_cluster(DBClusterIdentifier=ClusterID)
# print ("Do something with it : %s" % db_instance_arn)
elif status == 'stopped':
print("starting up %s " % ClusterID)
client.start_db_cluster(DBClusterIdentifier=ClusterID)
else:
print("The database is " + status + " status!")

You are most likely having issues due to indentation issues between CloudFormation YAML and the inline function code. Here is an example of CloudFormation YAML that would use your code inline to create a function:
Resources:
YourFunction:
Type: AWS::Lambda::Function
Properties:
Code:
ZipFile: |
Tag = 'AutoPower'
Key = 'True'
client = boto3.client('rds')
response = client.describe_db_cluster()
for resp in response['DBCluster']:
db_cluster_arn = resp['DBClusterArn']
response = client.list_tags_for_resource(ResourceName=db_cluster_arn)
for tags in response['TagList']:
if tags['Key'] == str(Key) and tags['Value'] == str(Tag):
status = resp['DBClusterStatus']
ClusterID = resp['DBClusterIdentifier']
print(InstanceID)
if status == 'available':
print("shutting down %s " % ClusterID)
client.stop_db_cluster(DBClusterIdentifier=ClusterID)
# print ("Do something with it : %s" % db_instance_arn)
elif status == 'stopped':
print("starting up %s " % ClusterID)
client.start_db_cluster(DBClusterIdentifier=ClusterID)
else:
print("The database is " + status + " status!")
Handler: index.lambda_handler
Role: !Sub "arn:aws:iam::${AWS::AccountId}:role/YourRoleNameHere"
Runtime: python3.9
Description: This is an example of a function in CloudFormation
FunctionName: Your_Function_Name
MemorySize: 128
Timeout: 180

Related

blpapi.exception.UnsupportedOperationException: No subscription management endpoints for snapshot (0x00080013)

I am trying to request snapshot from Bloomberg through Python API using the example programs that Bloomberg provided with the package. Their own program doesn't work properly and I keep getting the error:
WARN blpapi_subscriptionmanager.cpp:1653 blpapi.session.subscriptionmanager.{1} Subscription management endpoints are required for snapshot request templates but none are available
blpapi.exception.UnsupportedOperationException: No subscription management endpoints for snapshot (0x00080013).
The part of the code that has the snapshot request is in main func:
def main():
"""main entry point"""
global options
options = parseCmdLine()
# Create a session and Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
for idx, host in enumerate(options.hosts):
sessionOptions.setServerAddress(host, options.port, idx)
sessionOptions.setAuthenticationOptions(options.auth)
sessionOptions.setAutoRestartOnDisconnection(True)
print("Connecting to port %d on %s" % (
options.port, ", ".join(options.hosts)))
session = blpapi.Session(sessionOptions)
if not session.start():
print("Failed to start session.")
return
subscriptionIdentity = None
if options.auth:
subscriptionIdentity = session.createIdentity()
isAuthorized = False
authServiceName = "//blp/apiauth"
if session.openService(authServiceName):
authService = session.getService(authServiceName)
isAuthorized = authorize(authService, subscriptionIdentity,
session, blpapi.CorrelationId("auth"))
if not isAuthorized:
print("No authorization")
return
else:
print("Not using authorization")
# Snapshot Request Part:
fieldStr = "?fields=" + ",".join(options.fields)
snapshots = []
nextCorrelationId = 0
for i, topic in enumerate(options.topics):
subscriptionString = options.service + topic + fieldStr
snapshots.append(session.createSnapshotRequestTemplate(
subscriptionString,
subscriptionIdentity,
blpapi.CorrelationId(i)))
nextCorrelationId += 1
requestTemplateAvailable = blpapi.Name('RequestTemplateAvailable')
eventCount = 0
try:
while True:
# Specify timeout to give a chance for Ctrl-C
event = session.nextEvent(1000)
for msg in event:
if event.eventType() == blpapi.Event.ADMIN and \
msg.messageType() == requestTemplateAvailable:
for requestTemplate in snapshots:
session.sendRequestTemplate(
requestTemplate,
blpapi.CorrelationId(nextCorrelationId))
nextCorrelationId += 1
elif event.eventType() == blpapi.Event.RESPONSE or \
event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
cid = msg.correlationIds()[0].value()
print("%s - %s" % (cid, msg))
else:
print(msg)
if event.eventType() == blpapi.Event.RESPONSE:
eventCount += 1
if eventCount >= options.maxEvents:
print("%d events processed, terminating." % eventCount)
break
elif event.eventType() == blpapi.Event.TIMEOUT:
for requestTemplate in snapshots:
session.sendRequestTemplate(
requestTemplate,
blpapi.CorrelationId(nextCorrelationId))
nextCorrelationId += 1
I don't know if endpoint and subscription management endpoint are 2 different things because I have one other code working properly and the endpoint is the IP of the server I am pulling the data.

Querying Athena from Lambda function - QUEUED state?

I've been successfully querying s3 via athena from inside a lambda function for quite some time but it has suddenly stopped working. Further investigation shows that the response from get_query_execution() is returned a state of 'QUEUED' (which i was led to believe is not used?!)
My code is as follows:
def run_query(query, database, s3_output, max_execution=5):
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': s3_output
})
execution_id = response['QueryExecutionId']
print("QueryExecutionId = " + str(execution_id))
state = 'RUNNING'
while (max_execution > 0 and state in ['RUNNING']):
max_execution = max_execution - 1
print("maxexecution=" + str(max_execution))
response = client.get_query_execution(QueryExecutionId = execution_id)
if 'QueryExecution' in response and \
'Status' in response['QueryExecution'] and \
'State' in response['QueryExecution']['Status']:
state = response['QueryExecution']['Status']['State']
print(state)
if state == 'SUCCEEDED':
print("Query SUCCEEDED: {}".format(execution_id))
s3_key = 'athena_output/' + execution_id + '.csv'
print(s3_key)
local_filename = '/tmp/' + execution_id + '.csv'
print(local_filename)
rows = []
try:
print("s3key =" + s3_key)
print("localfilename = " + local_filename)
s3.Bucket(BUCKET).download_file(s3_key, local_filename)
with open(local_filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rows.append(row)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
print(e)
else:
raise
return json.dumps(rows)
elif state == 'FAILED':
return False
time.sleep(10)
return False
So it obviously is working as it should be - it's just that the 'QUEUED' state is completely unexpected and i'm not sure what to do about it? What can cause the query_execution to become 'QUEUED' and what needs to change in my code to cater for it?
Take a look on Athena hook in Apache Airflow. Athena has final states (SUCCEEDED, FAILED and CANCELLED) and intermediate states - RUNNING and QUEUED. QUEUED is a normal state for a query before it got stared. So you could use code like this:
def run_query(query, database, s3_output, max_execution=5):
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': s3_output
})
execution_id = response['QueryExecutionId']
print("QueryExecutionId = " + str(execution_id))
state = 'QUEUED'
while (max_execution > 0 and state in ['RUNNING', 'QUEUED']):
max_execution = max_execution - 1
print("maxexecution=" + str(max_execution))
response = client.get_query_execution(QueryExecutionId = execution_id)
if 'QueryExecution' in response and \
'Status' in response['QueryExecution'] and \
'State' in response['QueryExecution']['Status']:
state = response['QueryExecution']['Status']['State']
print(state)
if state == 'SUCCEEDED':
print("Query SUCCEEDED: {}".format(execution_id))
s3_key = 'athena_output/' + execution_id + '.csv'
print(s3_key)
local_filename = '/tmp/' + execution_id + '.csv'
print(local_filename)
rows = []
try:
print("s3key =" + s3_key)
print("localfilename = " + local_filename)
s3.Bucket(BUCKET).download_file(s3_key, local_filename)
with open(local_filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rows.append(row)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
print(e)
else:
raise
return json.dumps(rows)
elif state == 'FAILED' or state == 'CANCELLED':
return False
time.sleep(10)
return False
Got this response from AWS - there has been changes to Athena that caused this issue (although QUEUED has been in the state enum for some time is hasn't been used until now):
The Athena team recently deployed a host of new functionality for Athena, including more granular CloudWatch metrics for Athena queries.
For more information:
AWS What's New page
Athena docs on CloudWatch metrics
As part of the deployment of more granular metrics, Athena now includes a QUEUED status for queries. This status indicates that an Athena query is waiting for resources to be allocated for processing. Query flow is roughly:
SUBMITTED -> QUEUED -> RUNNING -> COMPLETED/FAILED
Note that queries that fail due to system errors can be put back into the queue and retried.
I apologise for the frustration that this change has caused.
It seems like the forum formatting has stripped some elements from your code snippets.
However, I think that your WHILE loop is working on an array of the possible query statuses, which didn't previously cater for QUEUED.
If that is the case, then yes, adding QUEUED to that array will allow your application to handle the new status.

Python server hangs

I have a problem with python 3.6 HTTPServer
When the browser is sending requests everything is fine. When I send request via PostMan the request is completed (code 200) but then the requests fail. If I ping the server again via postman all pending are completed and then they continue to fail.
This is the JS that send requests to the server
jQuery.post("http://<?php echo $_SERVER['SERVER_ADDR']?>:8081", JSON.stringify({
auth: "security_token",
user_id: <?php echo $_SESSION[CLIENT_LOGGED] ?>
})
And this is how the server process the request
def do_POST(self):
contentLen = int(self.headers['content-length'])
postBody = self.rfile.read(contentLen)
try:
data = json.loads(postBody.decode('utf-8'))
except ValueError:
self.handeValueError()
return
self.processRequest(data)
return
def processRequest(self, data):
if (data["auth"] == self.centralKey):
self.processCentral(data)
elif (data["auth"] == self.clientKey):
self.processClient(data)
else:
self._set_badAuth()
def processCentral(self, inputData):
self._set_validMethod()
phone = standartizePhone(inputData["caller"])
if(inputData["ringing"] == "1"):
result = getCallerInfo(phone)
if (len(result) == 0):
temp = CallerData(phone, -1, 0)
self.callerInfo[inputData["internal"]].append(temp)
for row in result:
if (row[2] == 0):
temp = CallerData(phone, row[0], row[1], 1)
else:
temp = CallerData(phone, row[0], row[1], 1, row[2], row[3], row[4], row[5])
self.callerInfo[inputData["internal"]].append(temp)
elif(inputData["ringing"] == "2"):
for el in self.callerInfo[inputData["internal"]]:
el.c_status = 2
elif(inputData["ringing"] == "3"):
caller = CallerData(phone, 0, 0)
values = [value for value in self.callerInfo[inputData["internal"]] if value != caller]
self.callerInfo[inputData["internal"]] = values
else:
message = '{"success": "false"}'
self.wfile.write(bytes(message, "utf8"))
return False
message = '{"success": "true"}'
self.wfile.write(bytes(message, "utf8"))
return True
def processClient(self, inputData):
self._set_validMethod()
message = "["
internalMsg = []
internalNums = getUserInternal(inputData["user_id"])
for internal in internalNums:
for caller in self.callerInfo[str(internal[0])]:
internalMsg.append(callerInfoToJSON(caller))
message += ", ".join(internalMsg)
message += "]"
self.wfile.write(bytes(message, "utf8"))
Note that it doesn't matter if I am sending the request as central or client via postman. In both ways it just hangs. Also no errors are logged from the python script
Note 2 Updating postman to latest version seems to fix the problem

rq.job.Job returning a 'sh: 1: mv: not found' error

I currently have a master python script which launches 6 jobs on remote hosts, and polls whether the jobs are done or not over a long period (days, usually). However, in my code below, the first element in the self.job_results list is always ''sh: 1: mv: not found'. However, the 6 job values always are in that list (e.g. there are 7 elements in the list, and there should only be 6). It appears that rq.job.Job is returning this value; any idea why?
hosts = HOSTS.keys()
job_ids = []
for host in hosts:
r = requests.get(HOSTS[host] + 'launch_jobs', auth=('admin', 'secret'))
job_ids.append(r.text)
host_job_dict = dict(zip(hosts, job_ids))
print "HOST_JOB_DICT: %s " % host_job_dict
launch_time = datetime.datetime.now()
self.job_result = []
complete = False
status = [False]*len(hosts)
host_job_keys = host_job_dict.keys()
while not complete:
check_time = datetime.datetime.now()
time_diff = check_time - launch_time
if time_diff.seconds > JOB_TIMEOUT:
sys.exit('Job polling has lasted 10 days, something is wrong')
print "HOST_JOB_KEYS %s " % host_job_keys
for idx, key in enumerate(host_job_keys):
if not status[idx]:
host = HOSTS[key]
j_id = host_job_dict[key]
req = requests.get(host + 'check_job/' + j_id, auth=('admin', 'secret'))
if req.status_code == 202:
continue
elif req.status_code == 200:
self.job_result.append(req.json()['results'].encode('ascii').split())
status[idx] = True
complete = all(status)
time.sleep(1)
And on the server side of things...
#app.route("/check_job/<job_key>", methods=['GET'])
#requires_auth
def check_job(job_key):
job = Job.fetch(job_key, connection=conn)
if job.is_finished:
data = job.return_value
json_data = jsonify({"results": data})
# return Response(response=json_data, status=200, mimetype="application/json")
return json_data
elif job.status == 'failed':
return "Failed", 202
else:
return "Not yet", 202
This turned out to be an extremely convoluted issue where mv and other commands in /bin aren't being recognized. To get around this, we just were explicit and used /bin/mvinstead. We believe this issue cropped up as a result of a complication from a systemctl instantiation

Raspberry Pi python youtube api json error

I'm using Raspberry Pi 3 & Python 2.7.9.
I'm trying to using youtube api. here's the code I use:
#!/usr/bin/python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import json
import urllib
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = "REPLACE_ME" #I did replace api key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
FREEBASE_SEARCH_URL = "https://www.googleapis.com/freebase/v1/search?%s"
def get_topic_id(options):
# Retrieve a list of Freebase topics associated with the provided query term.
freebase_params = dict(query=options.query, key=DEVELOPER_KEY)
freebase_url = FREEBASE_SEARCH_URL % urllib.urlencode(freebase_params)
freebase_response = json.loads(urllib.urlopen(freebase_url).read())
if len(freebase_response["result"]) == 0:
exit("No matching terms were found in Freebase.")
# Display the list of matching Freebase topics.
mids = []
index = 1
print "The following topics were found:"
for result in freebase_response["result"]:
mids.append(result["mid"])
print " %2d. %s (%s)" % (index, result.get("name", "Unknown"),
result.get("notable", {}).get("name", "Unknown"))
index += 1
# Display a prompt for the user to select a topic and return the topic ID
# of the selected topic.
mid = None
while mid is None:
index = raw_input("Enter a topic number to find related YouTube %ss: " %
options.type)
try:
mid = mids[int(index) - 1]
except ValueError:
pass
return mid
def youtube_search(mid, options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# Call the search.list method to retrieve results associated with the
# specified Freebase topic.
search_response = youtube.search().list(
topicId=mid,
type=options.type,
part="id,snippet",
maxResults=options.max_results
).execute()
# Print the title and ID of each matching resource.
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["videoId"])
elif search_result["id"]["kind"] == "youtube#channel":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["channelId"])
elif search_result["id"]["kind"] == "youtube#playlist":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["playlistId"])
if __name__ == "__main__":
argparser.add_argument("--query", help="Freebase search term", default="Google")
argparser.add_argument("--max-results", help="Max YouTube results",
default=25)
argparser.add_argument("--type",
help="YouTube result type: video, playlist, or channel", default="channel")
args = argparser.parse_args()
mid = get_topic_id(args)
try:
youtube_search(mid, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
And I got error:
ValueError: No JSON object could be decoded
I really don't know how to handle this... anyone how to fix this?
I think it's related with OAuth 2.0 json file, but not sure... and I don't know how to use it

Categories

Resources