HP ALM results attachment and status update using python - python

Challenge : Attach screenshots to Tests in TestLab , update status as PASS/FAIL steps wise (currently updating pass status is enough)
I am expected to write a script in python , to attach test results to the testcases present in test lab, then for each test step Expected result to be set as "As Expected" and pass the TC step by step.
Ie while performing manually, we select the case , click run and then enter "As expected" in expected output area and pass that step, and perfrom this for all teststeps on the test case. This need to be automated. I hav a folder which has Screenshots(similar to TC name), so script shoudl upload the screenshots and update the status.
What I have tried so far :
I was able to connect to alm , with partial testcase name, I was able to pull full testcase name from testplan, but unfortunately i am still struggling to achieve the final goal.
My code so far :
import win32com
from win32com.client import Dispatch
import codecs
import re
import json
# Login Credentials
qcServer = "https://almurl.saas.microfocus.com/qcbin/"
qcUser = "my_username"
qcPassword = "pwd"
qcDomain = "domain"
testList = []
testdict = {}
project = "Crew_Management"
# Do the actual login
td = win32com.client.Dispatch("TDApiOle80.TDConnection.1")
td.InitConnectionEx(qcServer)
td.Login(qcUser,qcPassword)
td.Connect(qcDomain,project)
if td.Connected == True:
print ("System: Logged in to " +project)
else:
print ("Connect failed to " +project)
mg = td.TreeManager # Tree manager
name = ['TC001','TC002','TC003','TC003','TC004','TC005','TC006','TC007','TC008','TC009','TC010','TC011','TC012','TC013','TC014']
folder = mg.NodeByPath('Subject\\Test Factory\\MPG\\MPG Regression Test_Yearly Request\\GUI')
for x in name:
testList = folder.FindTests(x)
#print(type(testList))
print(testList[0].Name)
print(testList[0].DesStepsNum)
td.Disconnect()
td.Logout()
Any help or guidance is much appreciated !

Assuming that you have working experience in Python. Here I am writing all the different functions needed to complete your task.
Reference: https://admhelp.microfocus.com/alm/api_refs/REST_TECH_PREVIEW/ALM_REST_API_TP.html
Global Variable
import re
import json
import datetime
import time
import sys
import os, fnmatch
from os import listdir
from os.path import isfile, join
from xml.etree.ElementTree import Element, SubElement, tostring, parse
import glob
from requests.auth import HTTPBasicAuth
import requests
ALM_USER_NAME = ""
ALM_PASSWORD = ""
ALM_DOMAIN = ""
ALM_URL = ""
AUTH_END_POINT = ALM_URL + "authentication-point/authenticate"
QC_SESSION_END_POINT = ALM_URL + "rest/site-session"
QC_LOGOUT_END_POINT = ALM_URL + "authentication-point/logout"
ALM_MIDPOINT = "rest/domains/" + ALM_DOMAIN + "/projects/"
PATH_SEP = os.path.sep
Login Function
def alm_login(self):
"""
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
"""
response = self.alm_session.post(AUTH_END_POINT,
auth=HTTPBasicAuth(ALM_USER_NAME, ALM_PASSWORD))
if response.status_code == 200:
response = self.alm_session.post(QC_SESSION_END_POINT)
if response.status_code == 200 | response.status_code == 201:
print "ALM Authentication successful"
else:
print "Error: ", response.staus_code
else:
print "Error: ", response.staus_code
self.alm_session.headers.update({'Accept':'application/json',
'Content-Type': 'application/xml'})
return
Logout Function
After the logout method is successful the cookie should expire
def alm_logout(self):
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
response = self.alm_session.post(QC_LOGOUT_END_POINT)
print "Logout successful", response.headers.get('Expires'), response.status_code
return
Get Test Set Folder
If the test cases span across multiple test suites then it is better to get the test set folder first and find the necessary test suite.
def find_test_set_folder(self):
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
json_str = json.loads(self.find_folder_id(self.test_set_path.split("\\"), "test-set-folders"
, 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
Get Folder Id
This method will help you find the Test Suite Folder ID or Test Plan Folder Id.
def find_folder_id(self, arrfolder, str_api, parent_id, fields):
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
for foldername in arrfolder:
payload = {"query": "{name['" + foldername + "'];parent-id[" + str(parent_id) + "]}",
"fields": fields}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parent_id = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
else:
# print("Folder " + foldername + " does not exists")
inputdata = dict()
inputdata['Type'] = str_api[0:len(str_api) - 1]
inputdata['name'] = foldername
inputdata['parent-id'] = str(parent_id)
data = generate_xml_data(inputdata)
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parent_id = get_field_value(obj['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
return response.text
Create Run Instance
Before updating the testing status, we must create a run instance for the test.
def create_run_instance(self, test_set_id, test_map):
'''
Function : create_run_instance
Description : Create new run instances
Parameters : Test Set Id
'''
str_api = "test-instances"
fields = "id,test-id,test-config-id,cycle-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime(
'%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
_test_id = int(temp_map['test-id'])
self.parser_temp_dic[_test_id]['testcycl-id'] = temp_map['id']
self.parser_temp_dic[_test_id]['test-config-id'] = temp_map['test-config-id']
self.parser_temp_dic[_test_id]['test-id'] = temp_map['test-id']
self.parser_temp_dic[_test_id]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in self.parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
self.parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['name'] = run_name
inputdata['owner'] = ALM_USER_NAME
inputdata['test-instance'] = str(1)
inputdata['testcycl-id'] = str(temp_map['id'])
inputdata['cycle-id'] = str(temp_map['cycle-id'])
inputdata['status'] = 'Not Completed'
inputdata['test-id'] = temp_map['test-id']
inputdata['subtype-id'] = 'hp.qc.run.MANUAL'
data = generate_xml_data(inputdata)
run_instance_post = run_instance_post + data
self.bulk_operation("runs", run_instance_post + "</Entities>", True, "POST")
return
Update Run Instance
def update_run_instance(self, test_set_id):
'''
Function : update_run_instance
Description : Update the test status in run instances
Parameters : No input parameter
'''
fields = "id,test-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/runs", params=payload)
obj = json.loads(response.text)
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
self.parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['id'] = str(temp_map['id'])
intermediate_ = self.parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']
inputdata['testcycl-id'] = str(intermediate_)
inputdata['status'] = self.parser_temp_dic[int(temp_map['test-id'])]['final-status']
data = generate_xml_data(inputdata)
run_instance_put = run_instance_put + data
self.bulk_operation("runs", run_instance_put + "</Entities>", True, "PUT")
return
Upload Result File
Uploading file to any object in ALM
def upload_result_file(self, test_set_id, report_file):
'''
Function : upload_result_file
Description : Upload test result to ALM
'''
payload = open(report_file, 'rb')
headers = {}
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "test-results" + report_file[report_file.rfind(".")+1: ]
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/test-sets/" +
str(test_set_id) + "/attachments/",
headers=headers, data=payload)
if not (response.status_code == 200 or response.status_code == 201):
print "Attachment step failed!", response.text, response.url, response.status_code
return
Bulk Operation
This is a helper that allows us to POST an array of data.
def bulk_operation(self, str_api, data, isbulk, request_type):
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities.
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
isbulk - True or False
'''
response = None
headers = {}
try:
if isbulk:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
elif request_type == 'PUT':
response = self.alm_session.put(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
except Exception as err:
print err
if response.status_code == 200 | response.status_code == 201:
return response.text
return response

You can refer this code
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
You need to send only three values - Test Set ID (Cycle ID), ALM Test ID & Execution Status. The library automatically builds the json payload and creates a test run and update result.

Related

How do I set the python docusign_esign ApiClient to use a proxy?

I am using the following examples from the docusign site.
I have a set of python scripts that works well on my PC.
I have the move the code to a server behind a proxy.
I could not find any example or settings to configure a proxy.
I tired setting it in the underlining URLLIB3 code but it is being overwritten each time the AP creates class of the APIClient().
How do I set the python docusign_esign ApiClient to use a proxy?
Below is the portion of the code.
from docusign_esign import ApiClient
from docusign_esign import EnvelopesApi
from jwt_helper import get_jwt_token, get_private_key
# this one has all the connection parameters
from jwt_config import DS_JWT
import urllib3
proxy = urllib3.ProxyManager('http://<id>:<pwd>#<proxy_server>:3128/', maxsize=10)
# used by docusign to decide what you have access to
SCOPES = ["signature", "impersonation"]
# Call the envelope status change method to list the envelopes changed in the last 10 days
def worker(args):
api_client = ApiClient()
api_client.host = args['base_path']
api_client.set_default_header("Authorization", "Bearer " + args['access_token'])
envelope_api = EnvelopesApi(api_client)
# The Envelopes::listStatusChanges method has many options
# The list status changes call requires at least a from_date OR
# a set of envelopeIds. Here we filter using a from_date.
# Here we set the from_date to filter envelopes for the last month
# Use ISO 8601 date format
from_date = (datetime.datetime.utcnow() - timedelta(days=120)).isoformat()
results = envelope_api.list_status_changes(args['account_id'], from_date=from_date)
return results, envelope_api
# Call request_jwt_user_token method
def get_token(private_key, api_client):
token_response = get_jwt_token(private_key, SCOPES, DS_JWT["authorization_server"],
DS_JWT["ds_client_id"], DS_JWT["ds_impersonated_user_id"])
access_token = token_response.access_token
# Save API account ID
user_info = api_client.get_user_info(access_token)
accounts = user_info.get_accounts()
api_account_id = accounts[0].account_id
base_path = accounts[0].base_uri + "/restapi"
return {"access_token": access_token, "api_account_id": api_account_id, "base_path":
base_path}
# bucket to keep track of token info
def get_args(api_account_id, access_token, base_path):
args = {
"account_id": api_account_id,
"base_path": base_path,
"access_token": access_token
}
return args
# start the actual code here create and then setup the object
api_client = ApiClient()
api_client.set_base_path(DS_JWT["authorization_server"])
api_client.set_oauth_host_name(DS_JWT["authorization_server"])
api_client.rest_client.pool_manager.proxy = proxy
api_client.rest_client.pool_manager.proxy.scheme = "http"
private_key = get_private_key(DS_JWT["private_key_file"]).encode("ascii").decode("utf-8")
jwt_values = get_token(private_key, api_client)
args = get_args(jwt_values["api_account_id"], jwt_values["access_token"], jwt_values["base_path"])
account_id = args["account_id"]
# return the envelope list and api_client object created to get it
results, envelope_api = worker(args)
print("We found " + str(results.result_set_size) + " sets of files")
for envelope in results.envelopes:
envelope_id = envelope.envelope_id
print("Extracting " + envelope_id)
# The SDK always stores the received file as a temp file you can not set the path for this
# Call the envelope get method
temp_file = envelope_api.get_document(account_id=account_id, document_id="archive",
envelope_id=envelope_id)
if temp_file:
print("File is here " + temp_file)
with zipfile.ZipFile(temp_file, 'r') as zip_ref:
zip_ref.extractall(extract_dir + envelope_id + "\\")
zip_ref.close()
print("Done extracting " + envelope_id + " deleting zip file")
os.remove(temp_file)
print("Deleted file here " + temp_file)
else:
print("Failed to get data for " + envelope_id)

Nested JSON Values cause "TypeError: Object of type 'int64' is not JSON serializable"

Would love some help here. Full context this is my first "purposeful" Python script. Prior to this I've only dabbled a bit and am honestly still learning so maybe I jumped in a bit too early here.
Long story short, been running all over fixing various type mismatches or just general indentation issues (dear lord python isn't forgiving on this).
I think I'm about finished but have a few last issues. Most of them seem to come from the same section too. This script is just mean to get a csv file that has 3 columns and use that to send requests based on the first column (either iOS or Android). The problem is when I'm creating the body to send...
Here's the code (a few tokens omitted for postability):
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from tqdm import tqdm
from datetime import *
import uuid
import warnings
from math import isnan
import time
## throttling based on AF's 80 request per 2 minute rule
def throttle():
i = 0
while i <= 3:
print ("PAUSED FOR THROTTLING!" + "\n" + str(3-i) + " minutes remaining")
time.sleep(60)
i = i + 1
print (i)
return 0
## function for reformating the dates
def date():
d = datetime.utcnow() # # <-- get time in UTC
d = d.isoformat('T') + 'Z'
t = d.split('.')
t = t[0] + 'Z'
return str(t)
## function for dealing with Android requests
def android_request(madv_id,mtime,muuid,android_app,token,endpoint):
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
params = {'api_token': token }
subject_identities = {
"identity_format": "raw",
"identity_type": "android_advertising_id",
"identity_value": madv_id
}
body = {
'subject_request_id': muuid,
'subject_request_type': 'erasure',
'submitted_time': mtime,
'subject_identities': dict(subject_identities),
'property_id': android_app
}
body = json.dumps(body)
res = requests.request('POST', endpoint, headers=headers,
data=body, params=params)
print("android " + res.text)
## function for dealing with iOS requests
def ios_request(midfa, mtime, muuid, ios_app, token, endpoint):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
params = {'api_token': token}
subject_identities = {
'identity_format': 'raw',
'identity_type': 'ios_advertising_id',
'identity_value': midfa,
}
body = {
'subject_request_id': muuid,
'subject_request_type': 'erasure',
'submitted_time': mtime,
'subject_identities': list(subject_identities),
'property_id': ios_app,
}
body = json.dumps(body)
res = requests.request('POST', endpoint, headers=headers, data=body, params=params)
print("ios " + res.text)
## main run function. Determines whether it is iOS or Android request and sends if not LAT-user
def run(output, mdf, is_test):
# # assigning variables to the columns I need from file
print ('Sending requests! Stand by...')
platform = mdf.platform
device = mdf.device_id
if is_test=="y":
ios = 'id000000000'
android = 'com.tacos.okay'
token = 'OMMITTED_FOR_STACKOVERFLOW_Q'
endpoint = 'https://hq1.appsflyer.com/gdpr/stub'
else:
ios = 'id000000000'
android = 'com.tacos.best'
token = 'OMMITTED_FOR_STACKOVERFLOW_Q'
endpoint = 'https://hq1.appsflyer.com/gdpr/opengdpr_requests'
for position in tqdm(range(len(device))):
if position % 80 == 0 and position != 0:
throttle()
else:
req_id = str(uuid.uuid4())
timestamp = str(date())
if platform[position] == 'android' and device[position] != '':
android_request(device[position], timestamp, req_id, android, token, endpoint)
mdf['subject_request_id'][position] = req_id
if platform[position] == 'ios' and device[position] != '':
ios_request(device[position], timestamp, req_id, ios, token, endpoint)
mdf['subject_request_id'][position] = req_id
if 'LAT' in platform[position]:
mdf['subject_request_id'][position] = 'null'
mdf['error status'][position] = 'Limit Ad Tracking Users Unsupported. Device ID Required'
mdf.to_csv(output, sep=',', index = False, header=True)
# mdf.close()
print ('\nDONE. Please see ' + output
+ ' for the subject_request_id and/or error messages\n')
## takes the CSV given by the user and makes a copy of it for us to use
def read(mname):
orig_csv = pd.read_csv(mname)
mdf = orig_csv.copy()
# Check that both dataframes are actually the same
# print(pd.DataFrame.equals(orig_csv, mdf))
return mdf
## just used to create the renamed file with _LOGS.csv
def rename(mname):
msuffix = '_LOG.csv'
i = mname.split('.')
i = i[0] + msuffix
return i
## adds relevant columns to the log file
def logs_csv(out, df):
mdf = df
mdf['subject_request_id'] = ''
mdf['error status'] = ''
mdf['device_id'].fillna('')
mdf.to_csv(out, sep=',', index=None, header=True)
return mdf
## solely for reading in the file name from the user. creates string out of filename
def readin_name():
mprefix = input('FILE NAME: ')
msuffix = '.csv'
mname = str(mprefix + msuffix)
print ('\n' + 'Reading in file: ' + mname)
return mname
def start():
print ('\nWelcome to GDPR STREAMLINE')
# # blue = OpenFile()
testing = input('Is this a test? (y/n) : ')
# return a CSV
name = readin_name()
import_csv = read(name)
output_name = rename(name)
output_file = logs_csv(output_name, import_csv)
run( output_name, output_file, testing)
# # print ("FILE PATH:" + blue)
## to disable all warnings in console logs
warnings.filterwarnings('ignore')
start()
And here's the error stacktrace:
Reading in file: test.csv
Sending requests! Stand by...
0%| | 0/384 [00:00<?, ?it/s]
Traceback (most recent call last):
File "a_GDPR_delete.py", line 199, in <module>
start()
File "a_GDPR_delete.py", line 191, in start
run( output_name, output_file, testing)
File "a_GDPR_delete.py", line 114, in run
android_request(device[position], timestamp, req_id, android, token, endpoint)
File "a_GDPR_delete.py", line 57, in android_request
body = json.dumps(body)
File "/Users/joseph/anaconda3/lib/python3.6/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'int64' is not JSON serializable
TL;DR:
Getting a typeError when calling this on a JSON with another nested JSON. I've confirmed that the nested JSON is the problem because if I remove the "subject_identities" section this compiles and works...but the API I'm using NEEDS those values so this doesn't actually do anything without that section.
Here's the relevant code again (and in the version I first used that WAS working previously):
def android (madv_id, mtime, muuid):
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
params = {
"api_token": "OMMITTED_FOR_STACKOVERFLOW_Q"
}
body = {
"subject_request_id": muuid, #muuid,
"subject_request_type": "erasure",
"submitted_time": mtime,
"subject_identities": [
{ "identity_type": "android_advertising_id",
"identity_value": madv_id,
"identity_format": "raw" }
],
"property_id": "com.tacos.best"
}
body = json.dumps(body)
res = requests.request("POST",
"https://hq1.appsflyer.com/gdpr/opengdpr_requests",
headers=headers, data=body, params=params)
I get the feeling I'm close to this working. I had a much simpler version early on that worked but I rewrote this to be more dynamic and use less hard coded values (so that I can eventually use this to apply to any app I'm working with an not only the two it was made for).
Please be nice, I'm entirely new to python and also just rusty on coding in general (thus trying to do projects like this one)
You can check for numpy dtypes like so:
if hasattr(obj, 'dtype'):
obj = obj.item()
This will convert it to the closest equivalent data type
EDIT:
Apparently np.nan is JSON serializable so I've removed that catch from my answer
Thanks to everyone for helping so quickly here. Apparently I was deceived by the error message as the fix from #juanpa.arrivillaga did the job with one adjustment.
Corrected code was on these parts:
android_request(str(device[position]), timestamp, req_id, android, token, endpoint)
and here:
ios_request(str(device[position]), timestamp, req_id, ios, token, endpoint)
I had to cast to string apparently even though these values are not originally integers and tend to look like this instead ab12ab12-12ab-34cd-56ef-1234abcd5678

I want to save the results of running Python to a file [duplicate]

This question already has answers here:
How to redirect 'print' output to a file?
(15 answers)
Closed 4 years ago.
import requests, json, pprint
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pagesize = 1000
api_url_base = "https://test/sepm/api/v1/"
authentication_url = "https://test/sepm/api/v1/identity/authenticate"
json_format = True
payload = {
"username" : "test",
"password" : "test",
"domain" : ""}
headers = {"Content-Type":"application/json"}
r = requests.post(authentication_url, verify=False, headers=headers,
data=json.dumps(payload))
api_token = (r.json()["token"])
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer
{0}'.format(api_token)}
def get_info(url,params):
api_url = url
params = params
response = requests.get(api_url, headers=headers,verify=False,
params=params)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return response.status_code
def aggregate(endpoint_info,numberOfElements):
itr =0
while itr <= (numberOfElements-1):
computerName=endpoints_info['content'][itr]['computerName']
ipAddresses=endpoints_info['content'][itr]['ipAddresses'][0]
logonUserName=endpoints_info['content'][itr]['logonUserName']
lastUpdateTime=endpoints_info['content'][itr]['creationTime']
agentVersion = endpoints_info['content'][itr]['agentVersion']
print(computerName, ipAddresses, logonUserName, lastUpdateTime, a
gentVersion)
itr = itr+1
groups_url = '{0}groups'.format(api_url_base)
fingerprint_url = '{0}policy-objects/fingerprints'.format(api_url_base)
endpoints_url = '{0}computers?'.format(api_url_base)
total_pages = get_info(endpoints_url,{'pageSize':pagesize})['totalPages']
itr = 1
while itr <= total_pages:
params = {'pageSize':pagesize, 'pageIndex':itr}
endpoints_info = get_info(endpoints_url,params)
numberOfElements = endpoints_info['numberOfElements']
itr = itr +1
if endpoints_info is not 200:
aggregate(endpoints_info,numberOfElements)
else:
print('[!] Request Failed, {0}')
This is the code that uses the Symantec rest API.
When you run this
You can get the result of the list format as shown below.
commnad line output
P09PC 123.63.40.37 test-9 1520236609428 14.0.3897.1101
P10PC 123.63.40.31 test-10 1520230270130 14.0.3775.1002
P11PC 123.63.40.27 test-11 1520229680645 14.0.3775.1002
P12PC 123.63.40.26 test-12 1520229515250 14.0.3775.1002
I modified this source and I want to save the results to a file.
Unfortunately, the effort failed for several days.
Tell me how to save it as a file
Do you mean instead of printing on terminal you want to write it on a file?
The process is simple, just open a file in desired mode (append mode or write mode depending on your use case) and simply write it there.
Consider reading this tutorial for a grasp over the concept.
After initializing the file object (as described in the tutorial) you just need to write in the file instead of print statements. The code will look something like this
file = open('output.txt', 'w')
file.write(computerName +' '+ ipAddresses +' '+ logonUserName +' '+ lastUpdateTime +' '+ agentVersion)
file.close()

's3.Bucket' object has no attribute 'put': AttributeError

I am trying to download a file from an URL and upload the file in an S3 bucket.
My code is as follows-
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import xml.etree.ElementTree as etree
from datetime import datetime as dt
import os
import urllib
import requests
import boto3
from botocore.client import Config
from urllib.parse import urlparse
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print('event.session.application.applicationId=' + event['session'
]['application']['applicationId'])
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId'
]}, event['session'])
if event['request']['type'] == 'LaunchRequest':
return on_launch(event['request'], event['session'])
elif event['request']['type'] == 'IntentRequest':
return on_intent(event['request'], event['session'])
elif event['request']['type'] == 'SessionEndedRequest':
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print('on_session_started requestId='
+ session_started_request['requestId'] + ', sessionId='
+ session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print('on_launch requestId=' + launch_request['requestId']
+ ', sessionId=' + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print('on_intent requestId=' + intent_request['requestId']
+ ', sessionId=' + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == 'DownloadFiles':
return get_file(intent, session)
elif intent_name == 'AMAZON.HelpIntent':
return get_welcome_response()
else:
raise ValueError('Invalid intent')
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.Is not called when the skill returns should_end_session=true """
print('on_session_ended requestId='
+ session_ended_request['requestId'] + ', sessionId='
+ session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could add those here """
session_attributes = {}
card_title = 'Welcome'
speech_output = \
"Welcome to file download Application. Please ask me to download files by saying, Ask downloader for download"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = \
"Please ask me to download files by saying, Ask downloader for download"
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title,
speech_output, reprompt_text,
should_end_session))
def get_file(intent, session):
""" Grabs the files from the path that have to be downloaded """
card_title = intent['name']
session_attributes = {}
should_end_session = True
username = '*'
password = '*'
ACCESS_KEY_ID = '*'
ACCESS_SECRET_KEY = '*+9'
BUCKET_NAME = 'lambda-file-upload'
url = 'https://drive.google.com/drive/xyz'
filename = os.path.basename(urlparse(url).path)
# urllib.urlretrieve(url, "code.zip")
r = requests.get(url, auth=(username, password))
if r.status_code == 200:
with open("/tmp/" + filename, 'wb') as out:
for bits in r.iter_content():
out.write(bits)
data = open("/tmp/" + filename, 'rb')
# S3 Connect
s3 = boto3.resource(
's3',
aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key=ACCESS_SECRET_KEY,
config=Config(signature_version='s3v4')
)
# Uploaded File
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
speech_output = "The file" + filename + "has been downloaded"
reprompt_text = ""
return build_response(session_attributes,
build_speechlet_response(card_title,
speech_output, reprompt_text,
should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(
title,
output,
reprompt_text,
should_end_session,
):
return {
'outputSpeech': {'type': 'PlainText', 'text': output},
'card': {'type': 'Simple', 'title': 'SessionSpeechlet - ' \
+ title, 'content': 'SessionSpeechlet - ' + output},
'reprompt': {'outputSpeech': {'type': 'PlainText',
'text': reprompt_text}},
'shouldEndSession': should_end_session,
}
def build_response(session_attributes, speechlet_response):
return {'version': '1.0', 'sessionAttributes': session_attributes,
'response': speechlet_response}
I am getting the following error: -
's3.Bucket' object has no attribute 'put': AttributeError
I am trying to create an Alexa Skill which will download file from an URL. Hence I created a lambda function.I am new to AWS lambda and S3. I would really appreciate some help.
As per the official docs here, instead of
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
you should use put_object() function as
s3.Bucket(BUCKET_NAME).put_object(Key= filename, Body=data, ACL='public-read')
Bucket sub-resource of boto3 doesn't have an API method as put. That's why you are getting this error. The error is self-explanatory in that matter.
's3.Bucket' object has no attribute 'put'.
Use the provided API method put_object instead. Or you can simply replace the line
s3.Bucket(BUCKET_NAME).put(Key= filename, Body=data, ACL='public-read')
with
s3.Bucket(BUCKET_NAME).put_object(Key=filename, Body=data, ACL='public-read')
For more information about the method syntax or parameters, visit Boto3 Documentation of S3
Had similar issue due to different reason:
//This will give error while getting objects:
s3 = boto3.client('s3', region_name='us-east-1')
bucket = 'mybucketname'
//This works while getting objects:
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucketname')
//now get objects:
for object in bucket.objects.filter(Prefix="foldername/):
key = object.key

FLASK: Serving file to browser behind API proxy

When the user enters http://example2.com:5500/?param=x the code below generates a data.csv file and serves it to the browser. It works perfectly like this.
However, I have deployed it behind an API proxy, so that the user makes a call to http://example1.com/?param=x which is internally transformed into http://example2.com:5500/?param=x.
As a result, instead of serving data.csv to the browser as before, it displays on the browser all the data.csv content. The view source-code feature shows exactly what data.csv should contain, without any HTML headers, just the data.csv content, but it is not being served as attachement. Any ideas?
from flask import make_response
#app.route('/', methods = ['GET'])
def get_file():
alldata = []
while len(new_data) > 0:
new_data = api.timeline(max_id=oldest)
alldata.extend(new_data)
oldest = alldata[-1].id - 1
outdata = ""
for data in alldata:
outdata += ",".join(data) + "\n"
response = make_response(outdata)
response.headers["Content-Disposition"] = "attachment; filename=data.csv"
return response
if __name__ == '__main__':
app.run(host = app.config['HOST'], port = app.config['PORT'])
EDIT: Included mapping code to transform request to example1.com to example2.com (secret_url)
# This is example1.com
#app.route("/api/<projectTitle>/<path:urlSuffix>", methods=['GET'])
def projectTitlePage(projectTitle, urlSuffix):
projectId = databaseFunctions.getTitleProjectId(projectTitle)
projectInfo = databaseFunctions.getProjectInfo(projectId)
redirectionQueryString = re.sub('apikey=[^&]+&?', '', request.query_string).rstrip('&')
redirectionUrl = projectInfo['secretUrl'].rstrip('/')
if urlSuffix is not None:
redirectionUrl += '/' + urlSuffix.rstrip('/')
redirectionUrl += '/?' + redirectionQueryString
redirectionHeaders = request.headers
print request.args.to_dict(flat=False)
try:
r = requests.get(redirectionUrl, data=request.args.to_dict(flat=False), headers=redirectionHeaders)
except Exception, e:
return '/error=Error: bad secret url: ' + projectInfo.get('secretUrl')
return r.text
Your homegrown proxy is not returning headers back to the application. Try this:
#app.route("/api/<projectTitle>/<path:urlSuffix>", methods=['GET'])
def projectTitlePage(projectTitle, urlSuffix):
# ...
return r.text, r.status_code, r.headers

Categories

Resources