Calling 2 functions within another function - python

I'm trying to call the extract function and the extract_url function within a function. I get name error: name 'endpoint' and name 'agg_key' is not defined. I'm doing this so I can call a script from another script so I don't need to run the command line. How would I go about doing this?
Function I'm trying to call:
def scrape_all_products(URL):
extract(endpoint, agg_key, page_range=None)
extract_url(args)
Functions I'm calling:
def extract(endpoint, agg_key, page_range=None):
r_list = list(range(page_range[0], page_range[1]+1)) if page_range else []
page = 1
agg_data = []
while True:
page_endpoint = endpoint + f'?page={str(page)}'
response = requests.get(page_endpoint, timeout=(
int(os.environ.get('REQUEST_TIMEOUT', 0)) or 10))
response.raise_for_status()
if response.url != page_endpoint: # to handle potential redirects
p_endpoint = urlparse(response.url) # parsed URL
endpoint = p_endpoint.scheme + '://' + p_endpoint.netloc + p_endpoint.path
if not response.headers['Content-Type'] == 'application/json; charset=utf-8':
raise Exception('Incorrect response content type')
data = response.json()
page_has_products = agg_key in data and len(
data[agg_key]) > 0
page_in_range = page in r_list or page_range is None
# break loop if empty or want first page
if not page_has_products or not page_in_range:
break
agg_data += data[agg_key]
page += 1
return agg_data
Other function:
def extract_url(args):
p = format_url(args.url, scheme='https', return_type='parse_result')
formatted_url = p.geturl()
agg_key = 'products'
if args.collections:
agg_key = 'collections'
fp = os.path.join(
args.dest_path, f'{p.netloc}.{agg_key}.{args.output_type}')
if args.file_path:
fp = os.path.join(
args.dest_path, f'{args.file_path}.{args.output_type}')
endpoint = f'{formatted_url}/{agg_key}.json'
ret = {
'endpoint_attempted': endpoint,
'collected_at': str(datetime.now()),
'success': False,
'error': ''
}
try:
data = extract(endpoint, agg_key, args.page_range)
except requests.exceptions.HTTPError as err:
ret['error'] = str(err)
except json.decoder.JSONDecodeError as err:
ret['error'] = str(err)
except Exception as err:
ret['error'] = str(err)
else:
ret['success'] = True
ret[agg_key] = data
if ret['success']:
ret['file_path'] = str(fp)
save_to_file(fp, data, args.output_type)
return ret

The scrape_all_products function only knows about variables created inside of that function and variables passed to it (which in this case is URL). endpoint and agg_key were both created inside of a different function. You have to pass those variables to scrape_all_products the same way you are passing URL. So do:
def scrape_all_products(URL, endpoint, agg_key, args):
And then you would have to appropriately modify anywhere scrape_all_products is called.

Related

Getting Import/Library issue in my robotframework

Getting Import/Library issues in my robot framework, I've Customlib file where all my custom functions reside while trying to import the Customlib getting an error
[enter image description here][1]
[enter image description here][2]
[1]: https://i.stack.imgur.com/poPzQ.png
[2]: https://i.stack.imgur.com/qkbxK.png
CustomLib Code:
robot is complaining Setup failed: No keyword with name Customlib.get config test data
import os
# Declaring empty list for test data and config file
testData = {}
configTestData = {}
class CustomLib:
ROBOT_LIBRARY_SCOPE = 'Test Case'
# Function for getting data from confi file and test data file altogether
#staticmethod
def get_global_config_data_and_test_data(testdata_filename):
configpath = os.path.dirname(os.path.abspath(__file__))
print(configpath)
configpath1 = configpath.replace("Utils", "")
configpath = configpath1.replace(configpath1, "config.properties")
# configpath=configpath.replace("Utils", "config.properties")
try:
file = open(configpath)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
testData[a] = b
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
finally:
file.close()
return CustomLib.get_testData_From_PropertiesFile(CustomLib.OS_path_fromat_separator(testdata_filename))
# Function for reading test data from property file
#staticmethod
def get_testData_From_PropertiesFile(propfile):
try:
file = open(propfile)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
testData[a] = b
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
finally:
file.close()
return testData
# FUnction for generating dynamic path which has text in XPATH
def generate_dynamic_xpath(self, locatorvalue, replacement):
after_replacement = locatorvalue.replace('#', replacement)
return after_replacement
# Function for creating report name
def create_report_name(self, testContent, date):
reportname = testContent.replace('date', date)
return reportname
# Function for reading config.properties file
#staticmethod
def get_config_testdata():
configpath = os.path.dirname(os.path.abspath(__file__))
print(configpath)
configpath1 = configpath.replace("Utils", "")
configpath = configpath1.replace(configpath1, "config.properties")
# configpath=configpath.replace("Utils", "config.properties")
print(configpath)
try:
file = open(configpath)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
configTestData[a] = b
except Exception as e:
if hasattr(e, 'Exception occured while reading properties file'):
print(e.message)
else:
print(e)
finally:
file.close()
return configTestData
# Function to format the path for different OS
#staticmethod
def OS_path_fromat_separator(pathformat):
config_data = {}
config_data = CustomLib.get_config_testdata()
if (config_data['OS'] == 'Windows'):
OSPath = pathformat.replace('$', '//')
return OSPath
else:
OSPath = pathformat.replace('$', '/')
return OSPath
# Function for generating dynamic CSS on the basis of text
def generate_dynamic_CSS(self, locatorvalue, replacement):
after_replacement = locatorvalue.replace('#', replacement)
return after_replacement
def main():
CustomLib.get_global_config_data()
# CustomLib.get_testData_From_PropertiesFile()
if __name__ == '__main__':
main()
I'm using relative path to import custom library. Below is the reference, how to use import using relative path. So this should work.
Library ..${/}foldername${/}customlibrary.py
And you can also use full path and import the library

HP ALM results attachment and status update using python

Challenge : Attach screenshots to Tests in TestLab , update status as PASS/FAIL steps wise (currently updating pass status is enough)
I am expected to write a script in python , to attach test results to the testcases present in test lab, then for each test step Expected result to be set as "As Expected" and pass the TC step by step.
Ie while performing manually, we select the case , click run and then enter "As expected" in expected output area and pass that step, and perfrom this for all teststeps on the test case. This need to be automated. I hav a folder which has Screenshots(similar to TC name), so script shoudl upload the screenshots and update the status.
What I have tried so far :
I was able to connect to alm , with partial testcase name, I was able to pull full testcase name from testplan, but unfortunately i am still struggling to achieve the final goal.
My code so far :
import win32com
from win32com.client import Dispatch
import codecs
import re
import json
# Login Credentials
qcServer = "https://almurl.saas.microfocus.com/qcbin/"
qcUser = "my_username"
qcPassword = "pwd"
qcDomain = "domain"
testList = []
testdict = {}
project = "Crew_Management"
# Do the actual login
td = win32com.client.Dispatch("TDApiOle80.TDConnection.1")
td.InitConnectionEx(qcServer)
td.Login(qcUser,qcPassword)
td.Connect(qcDomain,project)
if td.Connected == True:
print ("System: Logged in to " +project)
else:
print ("Connect failed to " +project)
mg = td.TreeManager # Tree manager
name = ['TC001','TC002','TC003','TC003','TC004','TC005','TC006','TC007','TC008','TC009','TC010','TC011','TC012','TC013','TC014']
folder = mg.NodeByPath('Subject\\Test Factory\\MPG\\MPG Regression Test_Yearly Request\\GUI')
for x in name:
testList = folder.FindTests(x)
#print(type(testList))
print(testList[0].Name)
print(testList[0].DesStepsNum)
td.Disconnect()
td.Logout()
Any help or guidance is much appreciated !
Assuming that you have working experience in Python. Here I am writing all the different functions needed to complete your task.
Reference: https://admhelp.microfocus.com/alm/api_refs/REST_TECH_PREVIEW/ALM_REST_API_TP.html
Global Variable
import re
import json
import datetime
import time
import sys
import os, fnmatch
from os import listdir
from os.path import isfile, join
from xml.etree.ElementTree import Element, SubElement, tostring, parse
import glob
from requests.auth import HTTPBasicAuth
import requests
ALM_USER_NAME = ""
ALM_PASSWORD = ""
ALM_DOMAIN = ""
ALM_URL = ""
AUTH_END_POINT = ALM_URL + "authentication-point/authenticate"
QC_SESSION_END_POINT = ALM_URL + "rest/site-session"
QC_LOGOUT_END_POINT = ALM_URL + "authentication-point/logout"
ALM_MIDPOINT = "rest/domains/" + ALM_DOMAIN + "/projects/"
PATH_SEP = os.path.sep
Login Function
def alm_login(self):
"""
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
"""
response = self.alm_session.post(AUTH_END_POINT,
auth=HTTPBasicAuth(ALM_USER_NAME, ALM_PASSWORD))
if response.status_code == 200:
response = self.alm_session.post(QC_SESSION_END_POINT)
if response.status_code == 200 | response.status_code == 201:
print "ALM Authentication successful"
else:
print "Error: ", response.staus_code
else:
print "Error: ", response.staus_code
self.alm_session.headers.update({'Accept':'application/json',
'Content-Type': 'application/xml'})
return
Logout Function
After the logout method is successful the cookie should expire
def alm_logout(self):
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
response = self.alm_session.post(QC_LOGOUT_END_POINT)
print "Logout successful", response.headers.get('Expires'), response.status_code
return
Get Test Set Folder
If the test cases span across multiple test suites then it is better to get the test set folder first and find the necessary test suite.
def find_test_set_folder(self):
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
json_str = json.loads(self.find_folder_id(self.test_set_path.split("\\"), "test-set-folders"
, 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
Get Folder Id
This method will help you find the Test Suite Folder ID or Test Plan Folder Id.
def find_folder_id(self, arrfolder, str_api, parent_id, fields):
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
for foldername in arrfolder:
payload = {"query": "{name['" + foldername + "'];parent-id[" + str(parent_id) + "]}",
"fields": fields}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parent_id = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
else:
# print("Folder " + foldername + " does not exists")
inputdata = dict()
inputdata['Type'] = str_api[0:len(str_api) - 1]
inputdata['name'] = foldername
inputdata['parent-id'] = str(parent_id)
data = generate_xml_data(inputdata)
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parent_id = get_field_value(obj['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
return response.text
Create Run Instance
Before updating the testing status, we must create a run instance for the test.
def create_run_instance(self, test_set_id, test_map):
'''
Function : create_run_instance
Description : Create new run instances
Parameters : Test Set Id
'''
str_api = "test-instances"
fields = "id,test-id,test-config-id,cycle-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime(
'%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
_test_id = int(temp_map['test-id'])
self.parser_temp_dic[_test_id]['testcycl-id'] = temp_map['id']
self.parser_temp_dic[_test_id]['test-config-id'] = temp_map['test-config-id']
self.parser_temp_dic[_test_id]['test-id'] = temp_map['test-id']
self.parser_temp_dic[_test_id]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in self.parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
self.parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['name'] = run_name
inputdata['owner'] = ALM_USER_NAME
inputdata['test-instance'] = str(1)
inputdata['testcycl-id'] = str(temp_map['id'])
inputdata['cycle-id'] = str(temp_map['cycle-id'])
inputdata['status'] = 'Not Completed'
inputdata['test-id'] = temp_map['test-id']
inputdata['subtype-id'] = 'hp.qc.run.MANUAL'
data = generate_xml_data(inputdata)
run_instance_post = run_instance_post + data
self.bulk_operation("runs", run_instance_post + "</Entities>", True, "POST")
return
Update Run Instance
def update_run_instance(self, test_set_id):
'''
Function : update_run_instance
Description : Update the test status in run instances
Parameters : No input parameter
'''
fields = "id,test-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/runs", params=payload)
obj = json.loads(response.text)
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
self.parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['id'] = str(temp_map['id'])
intermediate_ = self.parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']
inputdata['testcycl-id'] = str(intermediate_)
inputdata['status'] = self.parser_temp_dic[int(temp_map['test-id'])]['final-status']
data = generate_xml_data(inputdata)
run_instance_put = run_instance_put + data
self.bulk_operation("runs", run_instance_put + "</Entities>", True, "PUT")
return
Upload Result File
Uploading file to any object in ALM
def upload_result_file(self, test_set_id, report_file):
'''
Function : upload_result_file
Description : Upload test result to ALM
'''
payload = open(report_file, 'rb')
headers = {}
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "test-results" + report_file[report_file.rfind(".")+1: ]
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/test-sets/" +
str(test_set_id) + "/attachments/",
headers=headers, data=payload)
if not (response.status_code == 200 or response.status_code == 201):
print "Attachment step failed!", response.text, response.url, response.status_code
return
Bulk Operation
This is a helper that allows us to POST an array of data.
def bulk_operation(self, str_api, data, isbulk, request_type):
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities.
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
isbulk - True or False
'''
response = None
headers = {}
try:
if isbulk:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
elif request_type == 'PUT':
response = self.alm_session.put(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
except Exception as err:
print err
if response.status_code == 200 | response.status_code == 201:
return response.text
return response
You can refer this code
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
You need to send only three values - Test Set ID (Cycle ID), ALM Test ID & Execution Status. The library automatically builds the json payload and creates a test run and update result.

Adding A Sentiment Analysis Loop When Collecting Twitter data

I am currently trying to add a sentiment analysis loop to a python script that collects tweets. When I run the script without the loop, it can generate the tweets just fine; however, whenever I add the for loop in ( starting at "for tweets in tweets returned", the tweets no longer generates and the csv I created does not appear as well. I was wondering if this had to do with where I have placed the for loop within the script or if there is some error with the loop itself. Any help would be greatly appreciated, thanks!
sentiments=[]
sentiment_means=[]
# Create URL Structure
class RequestWithMethod(urllib.request.Request):
def __init__(self, base_url, method, headers={}):
self._method = method
urllib.request.Request.__init__(self, base_url, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib.request.Request.get_method(self)
#Create Endpoint & Add Credentials
def create_rules_endpoint(query):
new_url = base_url + query
base64string = ('%s:%s' % (UN, PWD)).replace('\n', '')
base = base64.b64encode(base64string.encode('ascii'))
final_final_url = urllib.request.Request(new_url)
final_final_url.add_header('Authorization', 'Basic %s' % base.decode('ascii'))
return final_final_url
# Take in the Endpoint and Make the Request
def make_request(search_endpoint):
try:
response = urllib.request.urlopen(search_endpoint)
response_data = response.read()
handle_response(response_data)
except urllib.request.HTTPError as error:
print("ERROR: %s" % error)
# Handle the Returned Data
def handle_response(data):
tweets_returned = json.loads(data.decode('utf-8'))
print(tweets_returned)
**for tweet in tweets_returned['results']:
counter=1
compound_list=[]
positive_list = []
negative_list = []
neutral_list = []
geo_list = []
compound = analyzer.polarity_scores(tweet["text"])["compound"]
pos = analyzer.polarity_scores(tweet["text"])["pos"]
neu = analyzer.polarity_scores(tweet["text"])["neu"]
neg = analyzer.polarity_scores(tweet["text"])["neg"]
compound_list.append(compound)
positive_list.append(pos)
negative_list.append(neg)
neutral_list.append(neu)
sentiments.append({"Location": tweet["geo"],
"Date": tweet["created_at"],
"Tweet": tweet["text"],
"Compound": compound,
"Positive": pos,
"Neutral": neu,
"Negative": neg,
"Tweets_Ago": counter
})
counter+=1
sentiment_means.append({
"Compound_Mean": np.mean(compound_list),
"Positive": np.mean(positive_list),
"Neutral": np.mean(negative_list),
"Negative": np.mean(neutral_list),
"Count": len(compound_list)
})**
# Create the Endpoint Variable w/ Sample Query Keyword
search_endpoint = create_rules_endpoint('Wilson%20Rackets%20has%3Ageo%20lang%3Aen')
# Make the Request by Passing in Search Endpoint
make_request(search_endpoint)
# Convert all_sentiments to DataFrame
all_sentiments_pd = pd.DataFrame.from_dict(sentiments)
all_sentiments_pd.to_csv("sentiments_array_pd.csv")
display(all_sentiments_pd)
#print(all_sentiments_pd.dtypes)
# Convert sentiment_means to DataFrame
sentiment_means_pd = pd.DataFrame.from_dict(sentiment_means)
display(sentiment_means_pd)

why the process not run in python

i have a bot(query, key) function to post data, dicts(query, answer) to wrap the return result, and query_pipe(query_list) to process list of query request. But when i put that in multiprocessing.Process, i found that bot(query, key) return nothing. Here's my code.
def bot(query, key):
data = {
'key' : key,
'info' : query,
'userid' : 'wechat-robot',
}
try:
apiUrl = url
page = requests.post(apiUrl, data=data)
if page.json()['code'] == '100000':
answer = page.json()['text']
return dicts(query, answer)
else:
return dicts(query, 'failed')
except Exception as e:
return '500 Error'
def dicts(query, answer):
return {'query': query, 'answer': answer}
def query_pipe(query_list):
keys_pool = []
with open('keys.txt', 'r') as f:
lines = f.readlines()
for line in lines:
keys_pool.append(line.strip('\n'))
idx = 0
print(bot(query_list[0], keys_pool[0]))
p = Process(target=query_pipe, args=(query_data,))
p.start()
p.join()
But when i run the query_pipe(query_list) which not using multiprocess.Process, query_pipe(query_list) would print the correct output. I feel so confused, so anyone could give me a hint would be highly appreciated.

urrlib2 inside a loop

I'm trying to use urllib2 inside a loop with a try/except but when one iterate enter in the except, all the next iterations enter in the except too:
for machine_id in machines:
machine = Machine.objects.get(id=machine_id)
r2 = urllib2.Request('http://localhost:9191/run')
r2.add_header('Accept', 'application/json')
r2.add_header('Content-Type', 'application/json')
data = json.dumps({"client":"ssh", "tgt":machine.name, "fun": "state.sls", "arg":["update2", "nacl"]})
r2.add_data(data)
try:
resp2 = urllib2.urlopen(r2)
json_response = json.load(resp2)['return'][0]
json_response_m7 = json_response[machine.name]
try:
json_response_m7 = json_response_m7['return']
for key, value in json_response_m7.items():
if(value['result'] == False):
print(key)
print("\n")
print(value['changes']['stderr'])
data_return['key'].append(key)
data_return['error'].append(value['changes']['stderr'])
data_return['machine'].append(machine.name)
#data_return = {"key":key, "error": value['changes']['stderr']}
except:
print("except!!!!!!")
print(json_response_m7['stderr'])
data_return['key'].append('stderr:')
data_return['error'].append(json_response_m7['stderr'])
data_return['machine'].append(machine.name)
except (IOError, httplib.HTTPException):
print("Errooor")
data_return['key'].append('stderr: ')
data_return['error'].append('This machine is not added to the roster file')
data_return['machine'].append(machine.name)
the problem is with the first try/except. Can anyone help me please?
Thanks!

Categories

Resources