I am trying to download a file from an URL and upload the file in an S3 bucket.
My code is as follows-
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import xml.etree.ElementTree as etree
from datetime import datetime as dt
import os
import urllib
import requests
import boto3
from botocore.client import Config
from urllib.parse import urlparse
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'
]['application']['applicationId'])
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId'
]}, event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId='
+ session_started_request['requestId'] + ', sessionId='
+ session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId']
+ ', sessionId=' + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId']
+ ', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == 'DownloadFiles':
return get_file(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.Is not called when the skill returns should_end_session=true """
print('on_session_ended requestId='
+ session_ended_request['requestId'] + ', sessionId='
+ session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could add those here """
session_attributes = {}
card_title = 'Welcome'
speech_output = \
"Welcome to file download Application. Please ask me to download files by saying, Ask downloader for download"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = \
"Please ask me to download files by saying, Ask downloader for download"
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title,
speech_output, reprompt_text,
should_end_session))
def get_file(intent, session):
""" Grabs the files from the path that have to be downloaded """
card_title = intent['name']
session_attributes = {}
should_end_session = True
username = '*'
password = '*'
ACCESS_KEY_ID = '*'
ACCESS_SECRET_KEY = '*+9'
BUCKET_NAME = 'lambda-file-upload'
url = 'https://drive.google.com/drive/xyz'
filename = os.path.basename(urlparse(url).path)
# urllib.urlretrieve(url, "code.zip")
r = requests.get(url, auth=(username, password))
if r.status_code == 200:
with open("/tmp/" + filename, 'wb') as out:
for bits in r.iter_content():
out.write(bits)
data = open("/tmp/" + filename, 'rb')
# S3 Connect
s3 = boto3.resource(
's3',
aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key=ACCESS_SECRET_KEY,
config=Config(signature_version='s3v4')
)
# Uploaded File
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
speech_output = "The file" + filename + "has been downloaded"
reprompt_text = ""
return build_response(session_attributes,
build_speechlet_response(card_title,
speech_output, reprompt_text,
should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(
title,
output,
reprompt_text,
should_end_session,
):
return {
'outputSpeech': {'type': 'PlainText', 'text': output},
'card': {'type': 'Simple', 'title': 'SessionSpeechlet - ' \
+ title, 'content': 'SessionSpeechlet - ' + output},
'reprompt': {'outputSpeech': {'type': 'PlainText',
'text': reprompt_text}},
'shouldEndSession': should_end_session,
}
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
I am getting the following error: -
's3.Bucket' object has no attribute 'put': AttributeError
I am trying to create an Alexa Skill which will download file from an URL. Hence I created a lambda function.I am new to AWS lambda and S3. I would really appreciate some help.
As per the official docs here, instead of
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
you should use put_object() function as
s3.Bucket(BUCKET_NAME).put_object(Key= filename, Body=data, ACL='public-read')
Bucket sub-resource of boto3 doesn't have an API method as put. That's why you are getting this error. The error is self-explanatory in that matter.
's3.Bucket' object has no attribute 'put'.
Use the provided API method put_object instead. Or you can simply replace the line
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
with
s3.Bucket(BUCKET_NAME).put_object(Key=filename, Body=data, ACL='public-read')
For more information about the method syntax or parameters, visit Boto3 Documentation of S3
Had similar issue due to different reason:
//This will give error while getting objects:
s3 = boto3.client('s3', region_name='us-east-1')
bucket = 'mybucketname'
//This works while getting objects:
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucketname')
//now get objects:
for object in bucket.objects.filter(Prefix="foldername/):
key = object.key
Related
I'm trying to extract aws trust advisor data through lambda function(trigger by event scheduler) and upload to s3. However, some part of the function throws error. below is my code
##libraries
import boto3
import os
import csv
from csv import DictWriter
import time
import traceback
## bucket_name is set as env variable
bucket_name = "test-ta-reports"
fail_msg = 'Pulling Trusted Advisor data failed'
Filename = "/tmp/checks_list.csv"
obj_name = time.strftime("%Y-%m-%d-%H-%M-%S") + '/' + '.csv'
##upload to s3
def s3_upload(bucket_name, Filename, obj_name):
if obj_name is None:
obj_name = os.path.basename(Filename)
try:
s3 = boto3.client("s3", region_name="eu-west-1")
response = s3.upload_file(Filename, bucket_name, obj_name)
return True
except:
print('Data failed to upload to bucket')
traceback.print_exc()
return False
def lambda_handler(event, context):
try:
support_client = boto3.client('support', region_name='us-east-1')
ta_checks = support_client.describe_trusted_advisor_checks(language='en')
checks_list = {ctgs: [] for ctgs in list(set([checks['category'] for checks in ta_checks['checks']]))}
for checks in ta_checks['checks']:
print('Getting check:' + checks['name'] + checks['category'])
try:
check_summary = support_client.describe_trusted_advisor_check_summaries(
checkIds=[checks['id']])['summaries'][0]
if check_summary['status'] != 'not_available':
checks_list[checks['category']].append(
[checks['name'], check_summary['status'],
str(check_summary['resourcesSummary']['resourcesProcessed']),
str(check_summary['resourcesSummary']['resourcesFlagged']),
str(check_summary['resourcesSummary']['resourcesSuppressed']),
str(check_summary['resourcesSummary']['resourcesIgnored'])
])
else:
print("unable to append checks")
except:
print('Failed to get check: ' + checks['name'])
traceback.print_exc()
except:
print('Failed! Debug further.')
traceback.print_exc()
##rewrite dict to csv
with open('/tmp/checks_list.csv', 'w', newline='') as csvfile:
csv_writer = DictWriter(csvfile, fieldnames=['status','hasFlaggedResources','timestamp','resourcesSummary','categorySpecificSummary', 'checkId'])
csv_writer.writeheader()
csv_writer.writerow(check_summary)
return checks_list
if s3_upload(bucket_name, Filename, obj_name):
print("Successfully uploaded")
if __name__ == '__main__':
lambda_handler(event, context)
The error logs
unable to append checks
I'm new to Python. So, unsure of how to check for trackback stacks under else: statement. Is there any way to modify this code for getting traceback logs for the append block. Also, have i made any error in the above code. I'm unable to figure out any. PLz help
response = client.describe_trusted_advisor_check_summaries(
checkIds=[
'string',
]
)
describe_trusted_advisor_check_summaries() returns summarized results for one or more Trusted advisors. Here you are checking for the check_summary['status'] is not equal to not_avaialble i.e. alert status of the check is either "ok" (green), "warning" (yellow), "error" (red), and in that case, you are appending resourcesProcessed, resourcesFlagged, resourcesSuppressed, and resourcesIgnored to checks_list for further processing.
it's printing
unable to append checks
just because the status of the check is not_available. It is not an error log. Just deal with the case if the check status is not_available, what you should be doing?
See the documentation of describe_trusted_advisor_check_summaries. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/support.html#Support.Client.describe_trusted_advisor_check_summaries
I am using the following examples from the docusign site.
I have a set of python scripts that works well on my PC.
I have the move the code to a server behind a proxy.
I could not find any example or settings to configure a proxy.
I tired setting it in the underlining URLLIB3 code but it is being overwritten each time the AP creates class of the APIClient().
How do I set the python docusign_esign ApiClient to use a proxy?
Below is the portion of the code.
from docusign_esign import ApiClient
from docusign_esign import EnvelopesApi
from jwt_helper import get_jwt_token, get_private_key
# this one has all the connection parameters
from jwt_config import DS_JWT
import urllib3
proxy = urllib3.ProxyManager('http://<id>:<pwd>#<proxy_server>:3128/', maxsize=10)
# used by docusign to decide what you have access to
SCOPES = ["signature", "impersonation"]
# Call the envelope status change method to list the envelopes changed in the last 10 days
def worker(args):
api_client = ApiClient()
api_client.host = args['base_path']
api_client.set_default_header("Authorization", "Bearer " + args['access_token'])
envelope_api = EnvelopesApi(api_client)
# The Envelopes::listStatusChanges method has many options
# The list status changes call requires at least a from_date OR
# a set of envelopeIds. Here we filter using a from_date.
# Here we set the from_date to filter envelopes for the last month
# Use ISO 8601 date format
from_date = (datetime.datetime.utcnow() - timedelta(days=120)).isoformat()
results = envelope_api.list_status_changes(args['account_id'], from_date=from_date)
return results, envelope_api
# Call request_jwt_user_token method
def get_token(private_key, api_client):
token_response = get_jwt_token(private_key, SCOPES, DS_JWT["authorization_server"],
DS_JWT["ds_client_id"], DS_JWT["ds_impersonated_user_id"])
access_token = token_response.access_token
# Save API account ID
user_info = api_client.get_user_info(access_token)
accounts = user_info.get_accounts()
api_account_id = accounts[0].account_id
base_path = accounts[0].base_uri + "/restapi"
return {"access_token": access_token, "api_account_id": api_account_id, "base_path":
base_path}
# bucket to keep track of token info
def get_args(api_account_id, access_token, base_path):
args = {
"account_id": api_account_id,
"base_path": base_path,
"access_token": access_token
}
return args
# start the actual code here create and then setup the object
api_client = ApiClient()
api_client.set_base_path(DS_JWT["authorization_server"])
api_client.set_oauth_host_name(DS_JWT["authorization_server"])
api_client.rest_client.pool_manager.proxy = proxy
api_client.rest_client.pool_manager.proxy.scheme = "http"
private_key = get_private_key(DS_JWT["private_key_file"]).encode("ascii").decode("utf-8")
jwt_values = get_token(private_key, api_client)
args = get_args(jwt_values["api_account_id"], jwt_values["access_token"], jwt_values["base_path"])
account_id = args["account_id"]
# return the envelope list and api_client object created to get it
results, envelope_api = worker(args)
print("We found " + str(results.result_set_size) + " sets of files")
for envelope in results.envelopes:
envelope_id = envelope.envelope_id
print("Extracting " + envelope_id)
# The SDK always stores the received file as a temp file you can not set the path for this
# Call the envelope get method
temp_file = envelope_api.get_document(account_id=account_id, document_id="archive",
envelope_id=envelope_id)
if temp_file:
print("File is here " + temp_file)
with zipfile.ZipFile(temp_file, 'r') as zip_ref:
zip_ref.extractall(extract_dir + envelope_id + "\\")
zip_ref.close()
print("Done extracting " + envelope_id + " deleting zip file")
os.remove(temp_file)
print("Deleted file here " + temp_file)
else:
print("Failed to get data for " + envelope_id)
I am really new to Python and was hoping someone could help me with this. I have a function (an AWS Lamdba function actually) that I need to pass a variable value after it is created. I'm doing this to create a campaign in Pinpoint when a segment is created.
import os
import time
import boto3
from botocore.exceptions import ClientError
from datetime import datetime,timedelta
AWS_REGION = os.environ['region']
projectId = os.environ['projectId']
importRoleArn = os.environ['importRoleArn']
def lambda_handler(event, context):
print("Received event: " + str(event))
for record in event['Records']:
# Assign some variables to make it easier to work with the data in the
# event recordi
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
folder = os.path.split(key)[0]
folder_path = os.path.join(bucket, folder)
full_path = os.path.join(bucket, key)
s3_url = "s3://" + folder_path
# print(full_path);
# Check to see if all file parts have been processed.
if all_files_processed(bucket, folder, full_path):
# If you haven't recently run an import job that uses a file stored in
# the specified S3 bucket, then create a new import job. This prevents
# the creation of duplicate segments.
if not (check_import_jobs(bucket, folder, s3_url, full_path)):
segmentID = create_import_job(s3_url, full_path)
create_campaign(segmentID)
else:
print("Import job found with URL s3://"
+ os.path.join(bucket,folder) + ". Aborting.")
else:
print("Parts haven't finished processing yet.")
# Determine if all of the file parts have been processed.
def all_files_processed(bucket, folder, full_path):
# Use the "__ofN" part of the file name to determine how many files there
# should be.
number_of_parts = int((full_path.split("__of")[1]).split("_processed")[0])
# Figure out how many keys contain the prefix for the current batch of
# folders (basically, how many files are in the appropriate "folder").
client = boto3.client('s3')
objs = client.list_objects_v2(Bucket=bucket,Prefix=folder)
file_count = objs['KeyCount']
ready_for_import = False
if file_count == number_of_parts:
ready_for_import = True
return ready_for_import
# Check Amazon Pinpoint to see if any import jobs have been created by using
# the same S3 folder.
def check_import_jobs(bucket, folder, s3_url, full_path):
url_list = []
print(s3_url);
# Retrieve a list of import jobs for the current project ID.
client = boto3.client('pinpoint')
try:
client_response = client.get_import_jobs(
ApplicationId=projectId
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
segment_response = client_response['ImportJobsResponse']['Item']
#print(segment_response);
# Parse responses. Add all S3Url values to a list.
for item in segment_response:
#print(item);
s3_url_existing = full_path
url_list.append(s3_url_existing)
#print(url_list);
# Search for the current S3 URL in the list.
if s3_url in url_list:
found = True
else:
found = False
print(found);
return found
# Create the import job in Amazon Pinpoint.
def create_import_job(s3_url, full_path):
client = boto3.client('pinpoint')
segment_name = s3_url.split('/')[4]
try:
response = client.create_import_job(
ApplicationId=projectId,
ImportJobRequest={
'DefineSegment': True,
'Format': 'CSV',
'RegisterEndpoints': True,
'RoleArn': importRoleArn,
'S3Url': s3_url,
'SegmentName': segment_name
}
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Import job " + response['ImportJobResponse']['Id'] + " "
+ response['ImportJobResponse']['JobStatus'] + ".")
print("Segment ID: "
+ response['ImportJobResponse']['Definition']['SegmentId'])
print("Application ID: " + projectId)
return response['ImportJobResponse']['Definition']['SegmentId']
def create_campaign(segmentID):
client = boto3.client('pinpoint')
now = datetime.now()
dt_string = now.isoformat()
print(type(segmentID))
try:
response = client.create_campaign(
ApplicationId=projectId,
WriteCampaignRequest={
'Description': 'Test SMS Campaign 2',
'MessageConfiguration': {
'EmailMessage': {
'Body': 'This is a test 2',
'FromAddress': 'xxx#xxx.com',
'HtmlBody': '<p>Test 2</p>',
'Title': 'This is a test 2'
},
'SMSMessage': {
'Body': 'Thanks for your visit to {{Attributes.Provider_Name}} on {{Attributes.Clinical_Date_of_Service}}',
'MessageType': 'PROMOTIONAL',
'SenderId': 'XXX'
}
},
'Schedule': {
'Frequency': 'ONCE',
'IsLocalTime': True,
'StartTime': dt_string,
'Timezone': 'UTC'
},
'Name': 'Test Email Campaign 6',
'SegmentId': segmentID
}
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print('Campaign Created')
The issue comes up in create_campaign where I want to send the SegmentID to. I end up getting the following error...
"Segment specified in SegmentId is not found"
I can print the segmentID to the console no problem, it's just getting it to pass to the function is the roadblock I'm hitting. Thanks in advance!
Challenge : Attach screenshots to Tests in TestLab , update status as PASS/FAIL steps wise (currently updating pass status is enough)
I am expected to write a script in python , to attach test results to the testcases present in test lab, then for each test step Expected result to be set as "As Expected" and pass the TC step by step.
Ie while performing manually, we select the case , click run and then enter "As expected" in expected output area and pass that step, and perfrom this for all teststeps on the test case. This need to be automated. I hav a folder which has Screenshots(similar to TC name), so script shoudl upload the screenshots and update the status.
What I have tried so far :
I was able to connect to alm , with partial testcase name, I was able to pull full testcase name from testplan, but unfortunately i am still struggling to achieve the final goal.
My code so far :
import win32com
from win32com.client import Dispatch
import codecs
import re
import json
# Login Credentials
qcServer = "https://almurl.saas.microfocus.com/qcbin/"
qcUser = "my_username"
qcPassword = "pwd"
qcDomain = "domain"
testList = []
testdict = {}
project = "Crew_Management"
# Do the actual login
td = win32com.client.Dispatch("TDApiOle80.TDConnection.1")
td.InitConnectionEx(qcServer)
td.Login(qcUser,qcPassword)
td.Connect(qcDomain,project)
if td.Connected == True:
print ("System: Logged in to " +project)
else:
print ("Connect failed to " +project)
mg = td.TreeManager # Tree manager
name = ['TC001','TC002','TC003','TC003','TC004','TC005','TC006','TC007','TC008','TC009','TC010','TC011','TC012','TC013','TC014']
folder = mg.NodeByPath('Subject\\Test Factory\\MPG\\MPG Regression Test_Yearly Request\\GUI')
for x in name:
testList = folder.FindTests(x)
#print(type(testList))
print(testList[0].Name)
print(testList[0].DesStepsNum)
td.Disconnect()
td.Logout()
Any help or guidance is much appreciated !
Assuming that you have working experience in Python. Here I am writing all the different functions needed to complete your task.
Reference: https://admhelp.microfocus.com/alm/api_refs/REST_TECH_PREVIEW/ALM_REST_API_TP.html
Global Variable
import re
import json
import datetime
import time
import sys
import os, fnmatch
from os import listdir
from os.path import isfile, join
from xml.etree.ElementTree import Element, SubElement, tostring, parse
import glob
from requests.auth import HTTPBasicAuth
import requests
ALM_USER_NAME = ""
ALM_PASSWORD = ""
ALM_DOMAIN = ""
ALM_URL = ""
AUTH_END_POINT = ALM_URL + "authentication-point/authenticate"
QC_SESSION_END_POINT = ALM_URL + "rest/site-session"
QC_LOGOUT_END_POINT = ALM_URL + "authentication-point/logout"
ALM_MIDPOINT = "rest/domains/" + ALM_DOMAIN + "/projects/"
PATH_SEP = os.path.sep
Login Function
def alm_login(self):
"""
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
"""
response = self.alm_session.post(AUTH_END_POINT,
auth=HTTPBasicAuth(ALM_USER_NAME, ALM_PASSWORD))
if response.status_code == 200:
response = self.alm_session.post(QC_SESSION_END_POINT)
if response.status_code == 200 | response.status_code == 201:
print "ALM Authentication successful"
else:
print "Error: ", response.staus_code
else:
print "Error: ", response.staus_code
self.alm_session.headers.update({'Accept':'application/json',
'Content-Type': 'application/xml'})
return
Logout Function
After the logout method is successful the cookie should expire
def alm_logout(self):
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
response = self.alm_session.post(QC_LOGOUT_END_POINT)
print "Logout successful", response.headers.get('Expires'), response.status_code
return
Get Test Set Folder
If the test cases span across multiple test suites then it is better to get the test set folder first and find the necessary test suite.
def find_test_set_folder(self):
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
json_str = json.loads(self.find_folder_id(self.test_set_path.split("\\"), "test-set-folders"
, 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
Get Folder Id
This method will help you find the Test Suite Folder ID or Test Plan Folder Id.
def find_folder_id(self, arrfolder, str_api, parent_id, fields):
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
for foldername in arrfolder:
payload = {"query": "{name['" + foldername + "'];parent-id[" + str(parent_id) + "]}",
"fields": fields}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parent_id = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
else:
# print("Folder " + foldername + " does not exists")
inputdata = dict()
inputdata['Type'] = str_api[0:len(str_api) - 1]
inputdata['name'] = foldername
inputdata['parent-id'] = str(parent_id)
data = generate_xml_data(inputdata)
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parent_id = get_field_value(obj['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
return response.text
Create Run Instance
Before updating the testing status, we must create a run instance for the test.
def create_run_instance(self, test_set_id, test_map):
'''
Function : create_run_instance
Description : Create new run instances
Parameters : Test Set Id
'''
str_api = "test-instances"
fields = "id,test-id,test-config-id,cycle-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime(
'%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
_test_id = int(temp_map['test-id'])
self.parser_temp_dic[_test_id]['testcycl-id'] = temp_map['id']
self.parser_temp_dic[_test_id]['test-config-id'] = temp_map['test-config-id']
self.parser_temp_dic[_test_id]['test-id'] = temp_map['test-id']
self.parser_temp_dic[_test_id]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in self.parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
self.parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['name'] = run_name
inputdata['owner'] = ALM_USER_NAME
inputdata['test-instance'] = str(1)
inputdata['testcycl-id'] = str(temp_map['id'])
inputdata['cycle-id'] = str(temp_map['cycle-id'])
inputdata['status'] = 'Not Completed'
inputdata['test-id'] = temp_map['test-id']
inputdata['subtype-id'] = 'hp.qc.run.MANUAL'
data = generate_xml_data(inputdata)
run_instance_post = run_instance_post + data
self.bulk_operation("runs", run_instance_post + "</Entities>", True, "POST")
return
Update Run Instance
def update_run_instance(self, test_set_id):
'''
Function : update_run_instance
Description : Update the test status in run instances
Parameters : No input parameter
'''
fields = "id,test-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/runs", params=payload)
obj = json.loads(response.text)
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
self.parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['id'] = str(temp_map['id'])
intermediate_ = self.parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']
inputdata['testcycl-id'] = str(intermediate_)
inputdata['status'] = self.parser_temp_dic[int(temp_map['test-id'])]['final-status']
data = generate_xml_data(inputdata)
run_instance_put = run_instance_put + data
self.bulk_operation("runs", run_instance_put + "</Entities>", True, "PUT")
return
Upload Result File
Uploading file to any object in ALM
def upload_result_file(self, test_set_id, report_file):
'''
Function : upload_result_file
Description : Upload test result to ALM
'''
payload = open(report_file, 'rb')
headers = {}
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "test-results" + report_file[report_file.rfind(".")+1: ]
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/test-sets/" +
str(test_set_id) + "/attachments/",
headers=headers, data=payload)
if not (response.status_code == 200 or response.status_code == 201):
print "Attachment step failed!", response.text, response.url, response.status_code
return
Bulk Operation
This is a helper that allows us to POST an array of data.
def bulk_operation(self, str_api, data, isbulk, request_type):
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities.
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
isbulk - True or False
'''
response = None
headers = {}
try:
if isbulk:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
elif request_type == 'PUT':
response = self.alm_session.put(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
except Exception as err:
print err
if response.status_code == 200 | response.status_code == 201:
return response.text
return response
You can refer this code
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
You need to send only three values - Test Set ID (Cycle ID), ALM Test ID & Execution Status. The library automatically builds the json payload and creates a test run and update result.
Here's my script
import urllib.request
import json
import time
from jose import jwk, jwt
from jose.utils import base64url_decode
import base64
region = '....'
userpool_id = '.....'
app_client_id = '...'
keys_url = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpool_id)
response = urllib.request.urlopen(keys_url)
keys = json.loads(response.read())['keys']
token = request.headers['Authorization']
print(token)
# get the kid from the headers prior to verification
headers = jwt.get_unverified_headers(request.headers['Authorization'])
kid = headers['kid']
print(kid)
# search for the kid in the downloaded public keys
key_index = -1
for i in range(len(keys)):
if kid == keys[i]['kid']:
key_index = i
break
if key_index == -1:
print('Public key not found in jwks.json')
return False
# construct the public key
public_key = jwk.construct(keys[key_index])
# get the last two sections of the token,
# message and signature (encoded in base64)
message, encoded_signature = str(token).rsplit('.', 1)
# decode the
print('>>encoded signature')
print(encoded_signature)
decoded_signature = base64.b64decode(encoded_signature)
if not public_key.verify(message, decoded_signature):
print('Signature verification failed')
return False
print('Signature successfully verified')
I am always ending up Signature verification failed even though jwt token is generated by a valid legitimate cognito user pool. I've looked at the documentation and it does not really specify the whole verification process.
I see you're using jose, and I'm using pyjwt, but this solution might help you. Most of the bulk code from the bottom comes from the "api-gateway-authorizer-python" blueprint. Note that this is very frail code that will just break if anything is fails, I ended up not using lambda authentication but rather selecting AWS_IAM authentication for my API Gateway with Identity Pools so I never finished it.
This example requires that you install pyjwt and cryptography with pip on your work directory and upload everything as a .zip file.
I'd recommend that you watch this video if you want to consider the AWS_IAM authentication option: https://www.youtube.com/watch?v=VZqG7HjT2AQ
They also have a solution with a more elaborate lambda authorizer implementation in github at: https://github.com/awslabs/aws-serverless-auth-reference-app (they show the link at the beggining of the video) but I don't know about their pip dependencies.
from __future__ import print_function
from jwt.algorithms import RSAAlgorithm
import re
import jwt
import json
import sys
import urllib
region = 'your-region'
userpoolId = 'your-user-pool-id'
appClientId = 'your-app-client-id'
keysUrl = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpoolId)
def lambda_handler(event, context):
bearerToken = event['authorizationToken']
methodArn = event['methodArn']
print("Client token: " + bearerToken)
print("Method ARN: " + methodArn)
response = urllib.urlopen(keysUrl)
keys = json.loads(response.read())['keys']
jwtToken = bearerToken.split(' ')[-1]
header = jwt.get_unverified_header(jwtToken)
kid = header['kid']
jwkValue = findJwkValue(keys, kid)
publicKey = RSAAlgorithm.from_jwk(json.dumps(jwkValue))
decoded = decodeJwtToken(jwtToken, publicKey)
print('Decoded token: ' + json.dumps(decoded))
principalId = decoded['cognito:username']
methodArn = event['methodArn'].split(':')
apiGatewayArnTmp = methodArn[5].split('/')
awsAccountId = methodArn[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = methodArn[3]
policy.stage = apiGatewayArnTmp[1]
#policy.denyAllMethods()
policy.allowAllMethods()
# Finally, build the policy
authResponse = policy.build()
# new! -- add additional key-value pairs associated with the authenticated principal
# these are made available by APIGW like so: $context.authorizer.<key>
# additional context is cached
context = {
'key': 'value', # $context.authorizer.key -> value
'number': 1,
'bool': True
}
# context['arr'] = ['foo'] <- this is invalid, APIGW will not accept it
# context['obj'] = {'foo':'bar'} <- also invalid
authResponse['context'] = context
return authResponse
def findJwkValue(keys, kid):
for key in keys:
if key['kid'] == kid:
return key
def decodeJwtToken(token, publicKey):
try:
decoded=jwt.decode(token, publicKey, algorithms=['RS256'], audience=appClientId)
return decoded
except Exception as e:
print(e)
raise
class HttpVerb:
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
PATCH = 'PATCH'
HEAD = 'HEAD'
DELETE = 'DELETE'
OPTIONS = 'OPTIONS'
ALL = '*'
class AuthPolicy(object):
# The AWS account id the policy will be generated for. This is used to create the method ARNs.
awsAccountId = ''
# The principal used for the policy, this should be a unique identifier for the end user.
principalId = ''
# The policy version used for the evaluation. This should always be '2012-10-17'
version = '2012-10-17'
# The regular expression used to validate resource paths for the policy
pathRegex = '^[/.a-zA-Z0-9-\*]+$'
'''Internal lists of allowed and denied methods.
These are lists of objects and each object has 2 properties: A resource
ARN and a nullable conditions statement. The build method processes these
lists and generates the approriate statements for the final policy.
'''
allowMethods = []
denyMethods = []
# The API Gateway API id. By default this is set to '*'
restApiId = '*'
# The region where the API is deployed. By default this is set to '*'
region = '*'
# The name of the stage used in the policy. By default this is set to '*'
stage = '*'
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
'''Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null.'''
if verb != '*' and not hasattr(HttpVerb, verb):
raise NameError('Invalid HTTP verb ' + verb + '. Allowed verbs in HttpVerb class')
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError('Invalid resource path: ' + resource + '. Path should match ' + self.pathRegex)
if resource[:1] == '/':
resource = resource[1:]
resourceArn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}'.format(self.region, self.awsAccountId, self.restApiId, self.stage, verb, resource)
if effect.lower() == 'allow':
self.allowMethods.append({
'resourceArn': resourceArn,
'conditions': conditions
})
elif effect.lower() == 'deny':
self.denyMethods.append({
'resourceArn': resourceArn,
'conditions': conditions
})
def _getEmptyStatement(self, effect):
'''Returns an empty statement object prepopulated with the correct action and the
desired effect.'''
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _getStatementForEffect(self, effect, methods):
'''This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy.'''
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement['Resource'].append(curMethod['resourceArn'])
conditionalStatement['Condition'] = curMethod['conditions']
statements.append(conditionalStatement)
if statement['Resource']:
statements.append(statement)
return statements
def allowAllMethods(self):
'''Adds a '*' allow to the policy to authorize access to all methods of an API'''
self._addMethod('Allow', HttpVerb.ALL, '*', [])
def denyAllMethods(self):
'''Adds a '*' allow to the policy to deny access to all methods of an API'''
self._addMethod('Deny', HttpVerb.ALL, '*', [])
def allowMethod(self, verb, resource):
'''Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy'''
self._addMethod('Allow', verb, resource, [])
def denyMethod(self, verb, resource):
'''Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy'''
self._addMethod('Deny', verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
'''Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition'''
self._addMethod('Allow', verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
'''Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition'''
self._addMethod('Deny', verb, resource, conditions)
def build(self):
'''Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy.'''
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError('No statements defined for the policy')
policy = {
'principalId': self.principalId,
'policyDocument': {
'Version': self.version,
'Statement': []
}
}
policy['policyDocument']['Statement'].extend(self._getStatementForEffect('Allow', self.allowMethods))
policy['policyDocument']['Statement'].extend(self._getStatementForEffect('Deny', self.denyMethods))
return policy
Following class verifies Cognito tokens. You are required to install jose and pydantic.
The implementation is derived from this repo, it contains more details, addiotional functionalitites, tests etc.
import json
import logging
import os
import time
import urllib.request
from typing import Dict, List
from jose import jwk, jwt
from jose.utils import base64url_decode
from pydantic import BaseModel
class JWK(BaseModel):
"""A JSON Web Key (JWK) model that represents a cryptographic key.
The JWK specification:
https://datatracker.ietf.org/doc/html/rfc7517
"""
alg: str
e: str
kid: str
kty: str
n: str
use: str
class CognitoAuthenticator:
def __init__(self, pool_region: str, pool_id: str, client_id: str) -> None:
self.pool_region = pool_region
self.pool_id = pool_id
self.client_id = client_id
self.issuer = f"https://cognito-idp.{self.pool_region}.amazonaws.com/{self.pool_id}"
self.jwks = self.__get_jwks()
def __get_jwks(self) -> List[JWK]:
"""Returns a list of JSON Web Keys (JWKs) from the issuer. A JWK is a
public key used to verify a JSON Web Token (JWT).
Returns:
List of keys
Raises:
Exception when JWKS endpoint does not contain any keys
"""
file = urllib.request.urlopen(f"{self.issuer}/.well-known/jwks.json")
res = json.loads(file.read().decode("utf-8"))
if not res.get("keys"):
raise Exception("The JWKS endpoint does not contain any keys")
jwks = [JWK(**key) for key in res["keys"]]
return jwks
def verify_token(
self,
token: str,
) -> bool:
"""Verify a JSON Web Token (JWT).
For more details refer to:
https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
Args:
token: The token to verify
Returns:
True if valid, False otherwise
"""
try:
self._is_jwt(token)
self._get_verified_header(token)
self._get_verified_claims(token)
except CognitoError:
return False
return True
def _is_jwt(self, token: str) -> bool:
"""Validate a JSON Web Token (JWT).
A JSON Web Token (JWT) includes three sections: Header, Payload and
Signature. They are base64url encoded and are separated by dot (.)
characters. If JWT token does not conform to this structure, it is
considered invalid.
Args:
token: The token to validate
Returns:
True if valid
Raises:
CognitoError when invalid token
"""
try:
jwt.get_unverified_header(token)
jwt.get_unverified_claims(token)
except jwt.JWTError:
logging.info("Invalid JWT")
raise InvalidJWTError
return True
def _get_verified_header(self, token: str) -> Dict:
"""Verifies the signature of a a JSON Web Token (JWT) and returns its
decoded header.
Args:
token: The token to decode header from
Returns:
A dict representation of the token header
Raises:
CognitoError when unable to verify signature
"""
# extract key ID (kid) from token
headers = jwt.get_unverified_header(token)
kid = headers["kid"]
# find JSON Web Key (JWK) that matches kid from token
key = None
for k in self.jwks:
if k.kid == kid:
# construct a key object from found key data
key = jwk.construct(k.dict())
break
if not key:
logging.info(f"Unable to find a signing key that matches '{kid}'")
raise InvalidKidError
# get message and signature (base64 encoded)
message, encoded_signature = str(token).rsplit(".", 1)
signature = base64url_decode(encoded_signature.encode("utf-8"))
if not key.verify(message.encode("utf8"), signature):
logging.info("Signature verification failed")
raise SignatureError
# signature successfully verified
return headers
def _get_verified_claims(self, token: str) -> Dict:
"""Verifies the claims of a JSON Web Token (JWT) and returns its claims.
Args:
token: The token to decode claims from
Returns:
A dict representation of the token claims
Raises:
CognitoError when unable to verify claims
"""
claims = jwt.get_unverified_claims(token)
# verify expiration time
if claims["exp"] < time.time():
logging.info("Expired token")
raise TokenExpiredError
# verify issuer
if claims["iss"] != self.issuer:
logging.info("Invalid issuer claim")
raise InvalidIssuerError
# verify audience
# note: claims["client_id"] for access token, claims["aud"] otherwise
if claims["client_id"] != self.client_id:
logging.info("Invalid audience claim")
raise InvalidAudienceError
# verify token use
if claims["token_use"] != "access":
logging.info("Invalid token use claim")
raise InvalidTokenUseError
# claims successfully verified
return claims
class CognitoError(Exception):
pass
class InvalidJWTError(CognitoError):
pass
class InvalidKidError(CognitoError):
pass
class SignatureError(CognitoError):
pass
class TokenExpiredError(CognitoError):
pass
class InvalidIssuerError(CognitoError):
pass
class InvalidAudienceError(CognitoError):
pass
class InvalidTokenUseError(CognitoError):
pass
if __name__ == "__main__":
auth = CognitoAuthenticator(
pool_region=os.environ["AWS_COGNITO_REGION"],
pool_id=os.environ["AWS_USER_POOL_ID"],
client_id=os.environ["AWS_USER_POOL_CLIENT_ID"],
)
# note: if you are not using access token, see line 161
access_token = "my_access_token"
print(f"Token verified: {auth.verify_token(access_token)}")