Python notify main code when variable change in another .py file - python

I can't wrap my head around how to accomplish the following scenario:
On my main.py and request.py i have a reference to a config.py witch contains some configuration variables.
In this sample an offline variable that are either True or False.
What I would like to do is:
If eg. in my request.py I set the config.offline = True, then I would like to do somethin on my main.py.
But main.py also references the request.py so I cannot call any functions from request.py..
Any ideas to how I can do this?
I have over 1000 lines of code, so I cannot show every thing, but i have tried to show the most important:
main.py:
import config as cfg
import request as req
def doStuffWhenOfflineVarChanges(newState):
print(newState)
config.py:
offline = True
request.py:
import config as cfg
def logEntrance(barCode, noOfGuests, dt=datetime.now()):
date = dt.strftime("%Y-%m-%d")
time = dt.strftime("%H:%M:%S")
headers = {'Content-type': 'application/json', 'Authorization': cfg.auth}
url = 'https://xxxxx.xxxxxxxx.xxx/' + cfg.customerId + '/api.ashx'
params = {"inputtype": "logentrances"}
pl = [{"Barcode": barCode , "PoeID": cfg.poeId, "UserID": cfg.userId, "EntranceDate": date, "EntranceTime": time, "NoOfGuests": str(noOfGuests), "OfflineMode": cfg.offline}]
#print(url)
print(pl)
try:
r = requests.post(url, json=pl, params=params, headers=headers)
print(r.status_code)
except:
cfg.offline = True
return r

You need a call back function to handle the change in your onfig.py file!
# config.py
offline = False
def doStuffWhenOfflineVarChanges(newState):
print(newState)
# request.py
import config as cfg
class OfflineState:
def __init__(self, callback):
self._callback = callback
self._offline = cfg.offline
#property
def offline(self):
return self._offline
#offline.setter
def offline(self, value):
self._offline = value
self._callback(value)
offline_state = OfflineState(cfg.doStuffWhenOfflineVarChanges)
import requests
import datetime
def logEntrance(barCode, noOfGuests, dt=datetime.now()):
date = dt.strftime("%Y-%m-%d")
time = dt.strftime("%H:%M:%S")
headers = {'Content-type': 'application/json', 'Authorization': cfg.auth}
url = 'https://xxxxx.xxxxxxxx.xxx/' + cfg.customerId + '/api.ashx'
params = {"inputtype": "logentrances"}
pl = [{"Barcode": barCode , "PoeID": cfg.poeId, "UserID": cfg.userId, "EntranceDate": date, "EntranceTime": time, "NoOfGuests": str(noOfGuests), "OfflineMode": offline_state.offline}]
try:
r = requests.post(url, json=pl, params=params, headers=headers)
except:
offline_state.offline = True
return r

Related

How to output zipkin tracing results to a file in python?

I am using py_zipkin in my code. And I can see the tracing result on the Zipkin UI. But I don't know how to output the tracing results to a file with specified format, like a log file.
Here is a example of my code:
def fun1(self, param):
with zipkin_span(
service_name = 'my_code',
span_name = 'fun1',
transport_handler = http_transport,
port = 8080,
sample_rate = 100,
) as zipkin_context:
run_some_func(param)
zipkin_context.update_binary_annotations(param)
def http_transport(encoder_span):
zipkin_url = 'http://127.0.0.1:9411/api/spans'
requests.post(
zipkin_url,
data = encoded_span,
headers = {'Content-Type': 'application/x-thrift'}
)

HP ALM results attachment and status update using python

Challenge : Attach screenshots to Tests in TestLab , update status as PASS/FAIL steps wise (currently updating pass status is enough)
I am expected to write a script in python , to attach test results to the testcases present in test lab, then for each test step Expected result to be set as "As Expected" and pass the TC step by step.
Ie while performing manually, we select the case , click run and then enter "As expected" in expected output area and pass that step, and perfrom this for all teststeps on the test case. This need to be automated. I hav a folder which has Screenshots(similar to TC name), so script shoudl upload the screenshots and update the status.
What I have tried so far :
I was able to connect to alm , with partial testcase name, I was able to pull full testcase name from testplan, but unfortunately i am still struggling to achieve the final goal.
My code so far :
import win32com
from win32com.client import Dispatch
import codecs
import re
import json
# Login Credentials
qcServer = "https://almurl.saas.microfocus.com/qcbin/"
qcUser = "my_username"
qcPassword = "pwd"
qcDomain = "domain"
testList = []
testdict = {}
project = "Crew_Management"
# Do the actual login
td = win32com.client.Dispatch("TDApiOle80.TDConnection.1")
td.InitConnectionEx(qcServer)
td.Login(qcUser,qcPassword)
td.Connect(qcDomain,project)
if td.Connected == True:
print ("System: Logged in to " +project)
else:
print ("Connect failed to " +project)
mg = td.TreeManager # Tree manager
name = ['TC001','TC002','TC003','TC003','TC004','TC005','TC006','TC007','TC008','TC009','TC010','TC011','TC012','TC013','TC014']
folder = mg.NodeByPath('Subject\\Test Factory\\MPG\\MPG Regression Test_Yearly Request\\GUI')
for x in name:
testList = folder.FindTests(x)
#print(type(testList))
print(testList[0].Name)
print(testList[0].DesStepsNum)
td.Disconnect()
td.Logout()
Any help or guidance is much appreciated !
Assuming that you have working experience in Python. Here I am writing all the different functions needed to complete your task.
Reference: https://admhelp.microfocus.com/alm/api_refs/REST_TECH_PREVIEW/ALM_REST_API_TP.html
Global Variable
import re
import json
import datetime
import time
import sys
import os, fnmatch
from os import listdir
from os.path import isfile, join
from xml.etree.ElementTree import Element, SubElement, tostring, parse
import glob
from requests.auth import HTTPBasicAuth
import requests
ALM_USER_NAME = ""
ALM_PASSWORD = ""
ALM_DOMAIN = ""
ALM_URL = ""
AUTH_END_POINT = ALM_URL + "authentication-point/authenticate"
QC_SESSION_END_POINT = ALM_URL + "rest/site-session"
QC_LOGOUT_END_POINT = ALM_URL + "authentication-point/logout"
ALM_MIDPOINT = "rest/domains/" + ALM_DOMAIN + "/projects/"
PATH_SEP = os.path.sep
Login Function
def alm_login(self):
"""
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
"""
response = self.alm_session.post(AUTH_END_POINT,
auth=HTTPBasicAuth(ALM_USER_NAME, ALM_PASSWORD))
if response.status_code == 200:
response = self.alm_session.post(QC_SESSION_END_POINT)
if response.status_code == 200 | response.status_code == 201:
print "ALM Authentication successful"
else:
print "Error: ", response.staus_code
else:
print "Error: ", response.staus_code
self.alm_session.headers.update({'Accept':'application/json',
'Content-Type': 'application/xml'})
return
Logout Function
After the logout method is successful the cookie should expire
def alm_logout(self):
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
response = self.alm_session.post(QC_LOGOUT_END_POINT)
print "Logout successful", response.headers.get('Expires'), response.status_code
return
Get Test Set Folder
If the test cases span across multiple test suites then it is better to get the test set folder first and find the necessary test suite.
def find_test_set_folder(self):
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
json_str = json.loads(self.find_folder_id(self.test_set_path.split("\\"), "test-set-folders"
, 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
Get Folder Id
This method will help you find the Test Suite Folder ID or Test Plan Folder Id.
def find_folder_id(self, arrfolder, str_api, parent_id, fields):
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
for foldername in arrfolder:
payload = {"query": "{name['" + foldername + "'];parent-id[" + str(parent_id) + "]}",
"fields": fields}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parent_id = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
else:
# print("Folder " + foldername + " does not exists")
inputdata = dict()
inputdata['Type'] = str_api[0:len(str_api) - 1]
inputdata['name'] = foldername
inputdata['parent-id'] = str(parent_id)
data = generate_xml_data(inputdata)
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parent_id = get_field_value(obj['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
return response.text
Create Run Instance
Before updating the testing status, we must create a run instance for the test.
def create_run_instance(self, test_set_id, test_map):
'''
Function : create_run_instance
Description : Create new run instances
Parameters : Test Set Id
'''
str_api = "test-instances"
fields = "id,test-id,test-config-id,cycle-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime(
'%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
_test_id = int(temp_map['test-id'])
self.parser_temp_dic[_test_id]['testcycl-id'] = temp_map['id']
self.parser_temp_dic[_test_id]['test-config-id'] = temp_map['test-config-id']
self.parser_temp_dic[_test_id]['test-id'] = temp_map['test-id']
self.parser_temp_dic[_test_id]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in self.parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
self.parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['name'] = run_name
inputdata['owner'] = ALM_USER_NAME
inputdata['test-instance'] = str(1)
inputdata['testcycl-id'] = str(temp_map['id'])
inputdata['cycle-id'] = str(temp_map['cycle-id'])
inputdata['status'] = 'Not Completed'
inputdata['test-id'] = temp_map['test-id']
inputdata['subtype-id'] = 'hp.qc.run.MANUAL'
data = generate_xml_data(inputdata)
run_instance_post = run_instance_post + data
self.bulk_operation("runs", run_instance_post + "</Entities>", True, "POST")
return
Update Run Instance
def update_run_instance(self, test_set_id):
'''
Function : update_run_instance
Description : Update the test status in run instances
Parameters : No input parameter
'''
fields = "id,test-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/runs", params=payload)
obj = json.loads(response.text)
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
self.parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['id'] = str(temp_map['id'])
intermediate_ = self.parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']
inputdata['testcycl-id'] = str(intermediate_)
inputdata['status'] = self.parser_temp_dic[int(temp_map['test-id'])]['final-status']
data = generate_xml_data(inputdata)
run_instance_put = run_instance_put + data
self.bulk_operation("runs", run_instance_put + "</Entities>", True, "PUT")
return
Upload Result File
Uploading file to any object in ALM
def upload_result_file(self, test_set_id, report_file):
'''
Function : upload_result_file
Description : Upload test result to ALM
'''
payload = open(report_file, 'rb')
headers = {}
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "test-results" + report_file[report_file.rfind(".")+1: ]
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/test-sets/" +
str(test_set_id) + "/attachments/",
headers=headers, data=payload)
if not (response.status_code == 200 or response.status_code == 201):
print "Attachment step failed!", response.text, response.url, response.status_code
return
Bulk Operation
This is a helper that allows us to POST an array of data.
def bulk_operation(self, str_api, data, isbulk, request_type):
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities.
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
isbulk - True or False
'''
response = None
headers = {}
try:
if isbulk:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
elif request_type == 'PUT':
response = self.alm_session.put(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
except Exception as err:
print err
if response.status_code == 200 | response.status_code == 201:
return response.text
return response
You can refer this code
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
You need to send only three values - Test Set ID (Cycle ID), ALM Test ID & Execution Status. The library automatically builds the json payload and creates a test run and update result.

Nested JSON Values cause "TypeError: Object of type 'int64' is not JSON serializable"

Would love some help here. Full context this is my first "purposeful" Python script. Prior to this I've only dabbled a bit and am honestly still learning so maybe I jumped in a bit too early here.
Long story short, been running all over fixing various type mismatches or just general indentation issues (dear lord python isn't forgiving on this).
I think I'm about finished but have a few last issues. Most of them seem to come from the same section too. This script is just mean to get a csv file that has 3 columns and use that to send requests based on the first column (either iOS or Android). The problem is when I'm creating the body to send...
Here's the code (a few tokens omitted for postability):
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from tqdm import tqdm
from datetime import *
import uuid
import warnings
from math import isnan
import time
## throttling based on AF's 80 request per 2 minute rule
def throttle():
i = 0
while i <= 3:
print ("PAUSED FOR THROTTLING!" + "\n" + str(3-i) + " minutes remaining")
time.sleep(60)
i = i + 1
print (i)
return 0
## function for reformating the dates
def date():
d = datetime.utcnow() # # <-- get time in UTC
d = d.isoformat('T') + 'Z'
t = d.split('.')
t = t[0] + 'Z'
return str(t)
## function for dealing with Android requests
def android_request(madv_id,mtime,muuid,android_app,token,endpoint):
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
params = {'api_token': token }
subject_identities = {
"identity_format": "raw",
"identity_type": "android_advertising_id",
"identity_value": madv_id
}
body = {
'subject_request_id': muuid,
'subject_request_type': 'erasure',
'submitted_time': mtime,
'subject_identities': dict(subject_identities),
'property_id': android_app
}
body = json.dumps(body)
res = requests.request('POST', endpoint, headers=headers,
data=body, params=params)
print("android " + res.text)
## function for dealing with iOS requests
def ios_request(midfa, mtime, muuid, ios_app, token, endpoint):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
params = {'api_token': token}
subject_identities = {
'identity_format': 'raw',
'identity_type': 'ios_advertising_id',
'identity_value': midfa,
}
body = {
'subject_request_id': muuid,
'subject_request_type': 'erasure',
'submitted_time': mtime,
'subject_identities': list(subject_identities),
'property_id': ios_app,
}
body = json.dumps(body)
res = requests.request('POST', endpoint, headers=headers, data=body, params=params)
print("ios " + res.text)
## main run function. Determines whether it is iOS or Android request and sends if not LAT-user
def run(output, mdf, is_test):
# # assigning variables to the columns I need from file
print ('Sending requests! Stand by...')
platform = mdf.platform
device = mdf.device_id
if is_test=="y":
ios = 'id000000000'
android = 'com.tacos.okay'
token = 'OMMITTED_FOR_STACKOVERFLOW_Q'
endpoint = 'https://hq1.appsflyer.com/gdpr/stub'
else:
ios = 'id000000000'
android = 'com.tacos.best'
token = 'OMMITTED_FOR_STACKOVERFLOW_Q'
endpoint = 'https://hq1.appsflyer.com/gdpr/opengdpr_requests'
for position in tqdm(range(len(device))):
if position % 80 == 0 and position != 0:
throttle()
else:
req_id = str(uuid.uuid4())
timestamp = str(date())
if platform[position] == 'android' and device[position] != '':
android_request(device[position], timestamp, req_id, android, token, endpoint)
mdf['subject_request_id'][position] = req_id
if platform[position] == 'ios' and device[position] != '':
ios_request(device[position], timestamp, req_id, ios, token, endpoint)
mdf['subject_request_id'][position] = req_id
if 'LAT' in platform[position]:
mdf['subject_request_id'][position] = 'null'
mdf['error status'][position] = 'Limit Ad Tracking Users Unsupported. Device ID Required'
mdf.to_csv(output, sep=',', index = False, header=True)
# mdf.close()
print ('\nDONE. Please see ' + output
+ ' for the subject_request_id and/or error messages\n')
## takes the CSV given by the user and makes a copy of it for us to use
def read(mname):
orig_csv = pd.read_csv(mname)
mdf = orig_csv.copy()
# Check that both dataframes are actually the same
# print(pd.DataFrame.equals(orig_csv, mdf))
return mdf
## just used to create the renamed file with _LOGS.csv
def rename(mname):
msuffix = '_LOG.csv'
i = mname.split('.')
i = i[0] + msuffix
return i
## adds relevant columns to the log file
def logs_csv(out, df):
mdf = df
mdf['subject_request_id'] = ''
mdf['error status'] = ''
mdf['device_id'].fillna('')
mdf.to_csv(out, sep=',', index=None, header=True)
return mdf
## solely for reading in the file name from the user. creates string out of filename
def readin_name():
mprefix = input('FILE NAME: ')
msuffix = '.csv'
mname = str(mprefix + msuffix)
print ('\n' + 'Reading in file: ' + mname)
return mname
def start():
print ('\nWelcome to GDPR STREAMLINE')
# # blue = OpenFile()
testing = input('Is this a test? (y/n) : ')
# return a CSV
name = readin_name()
import_csv = read(name)
output_name = rename(name)
output_file = logs_csv(output_name, import_csv)
run( output_name, output_file, testing)
# # print ("FILE PATH:" + blue)
## to disable all warnings in console logs
warnings.filterwarnings('ignore')
start()
And here's the error stacktrace:
Reading in file: test.csv
Sending requests! Stand by...
0%| | 0/384 [00:00<?, ?it/s]
Traceback (most recent call last):
File "a_GDPR_delete.py", line 199, in <module>
start()
File "a_GDPR_delete.py", line 191, in start
run( output_name, output_file, testing)
File "a_GDPR_delete.py", line 114, in run
android_request(device[position], timestamp, req_id, android, token, endpoint)
File "a_GDPR_delete.py", line 57, in android_request
body = json.dumps(body)
File "/Users/joseph/anaconda3/lib/python3.6/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/Users/joseph/anaconda3/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'int64' is not JSON serializable
TL;DR:
Getting a typeError when calling this on a JSON with another nested JSON. I've confirmed that the nested JSON is the problem because if I remove the "subject_identities" section this compiles and works...but the API I'm using NEEDS those values so this doesn't actually do anything without that section.
Here's the relevant code again (and in the version I first used that WAS working previously):
def android (madv_id, mtime, muuid):
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
params = {
"api_token": "OMMITTED_FOR_STACKOVERFLOW_Q"
}
body = {
"subject_request_id": muuid, #muuid,
"subject_request_type": "erasure",
"submitted_time": mtime,
"subject_identities": [
{ "identity_type": "android_advertising_id",
"identity_value": madv_id,
"identity_format": "raw" }
],
"property_id": "com.tacos.best"
}
body = json.dumps(body)
res = requests.request("POST",
"https://hq1.appsflyer.com/gdpr/opengdpr_requests",
headers=headers, data=body, params=params)
I get the feeling I'm close to this working. I had a much simpler version early on that worked but I rewrote this to be more dynamic and use less hard coded values (so that I can eventually use this to apply to any app I'm working with an not only the two it was made for).
Please be nice, I'm entirely new to python and also just rusty on coding in general (thus trying to do projects like this one)
You can check for numpy dtypes like so:
if hasattr(obj, 'dtype'):
obj = obj.item()
This will convert it to the closest equivalent data type
EDIT:
Apparently np.nan is JSON serializable so I've removed that catch from my answer
Thanks to everyone for helping so quickly here. Apparently I was deceived by the error message as the fix from #juanpa.arrivillaga did the job with one adjustment.
Corrected code was on these parts:
android_request(str(device[position]), timestamp, req_id, android, token, endpoint)
and here:
ios_request(str(device[position]), timestamp, req_id, ios, token, endpoint)
I had to cast to string apparently even though these values are not originally integers and tend to look like this instead ab12ab12-12ab-34cd-56ef-1234abcd5678

I want to save the results of running Python to a file [duplicate]

This question already has answers here:
How to redirect 'print' output to a file?
(15 answers)
Closed 4 years ago.
import requests, json, pprint
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pagesize = 1000
api_url_base = "https://test/sepm/api/v1/"
authentication_url = "https://test/sepm/api/v1/identity/authenticate"
json_format = True
payload = {
"username" : "test",
"password" : "test",
"domain" : ""}
headers = {"Content-Type":"application/json"}
r = requests.post(authentication_url, verify=False, headers=headers,
data=json.dumps(payload))
api_token = (r.json()["token"])
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer
{0}'.format(api_token)}
def get_info(url,params):
api_url = url
params = params
response = requests.get(api_url, headers=headers,verify=False,
params=params)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return response.status_code
def aggregate(endpoint_info,numberOfElements):
itr =0
while itr <= (numberOfElements-1):
computerName=endpoints_info['content'][itr]['computerName']
ipAddresses=endpoints_info['content'][itr]['ipAddresses'][0]
logonUserName=endpoints_info['content'][itr]['logonUserName']
lastUpdateTime=endpoints_info['content'][itr]['creationTime']
agentVersion = endpoints_info['content'][itr]['agentVersion']
print(computerName, ipAddresses, logonUserName, lastUpdateTime, a
gentVersion)
itr = itr+1
groups_url = '{0}groups'.format(api_url_base)
fingerprint_url = '{0}policy-objects/fingerprints'.format(api_url_base)
endpoints_url = '{0}computers?'.format(api_url_base)
total_pages = get_info(endpoints_url,{'pageSize':pagesize})['totalPages']
itr = 1
while itr <= total_pages:
params = {'pageSize':pagesize, 'pageIndex':itr}
endpoints_info = get_info(endpoints_url,params)
numberOfElements = endpoints_info['numberOfElements']
itr = itr +1
if endpoints_info is not 200:
aggregate(endpoints_info,numberOfElements)
else:
print('[!] Request Failed, {0}')
This is the code that uses the Symantec rest API.
When you run this
You can get the result of the list format as shown below.
commnad line output
P09PC 123.63.40.37 test-9 1520236609428 14.0.3897.1101
P10PC 123.63.40.31 test-10 1520230270130 14.0.3775.1002
P11PC 123.63.40.27 test-11 1520229680645 14.0.3775.1002
P12PC 123.63.40.26 test-12 1520229515250 14.0.3775.1002
I modified this source and I want to save the results to a file.
Unfortunately, the effort failed for several days.
Tell me how to save it as a file
Do you mean instead of printing on terminal you want to write it on a file?
The process is simple, just open a file in desired mode (append mode or write mode depending on your use case) and simply write it there.
Consider reading this tutorial for a grasp over the concept.
After initializing the file object (as described in the tutorial) you just need to write in the file instead of print statements. The code will look something like this
file = open('output.txt', 'w')
file.write(computerName +' '+ ipAddresses +' '+ logonUserName +' '+ lastUpdateTime +' '+ agentVersion)
file.close()

Oauth1.0 API issue with Python

I'm trying to get magiccardmarket.eu API authentication to work in Python, but no matter whether I'm using rauth or requests_oauthlib, I get 403.
My code is:
#!/usr/bin/python
import logging
import rauth
import requests_oauthlib
logging.basicConfig(level=logging.DEBUG)
mkm_app_token = 'B7VI9Qg2xh855WtR'
mkm_app_secret = '<cut>'
mkm_access_token = 'LQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo'
mkm_token_secret = '<cut>'
url = 'https://sandbox.mkmapi.eu/ws/v1.1/account'
# session = rauth.OAuth1Session(
# consumer_key=mkm_app_token,
# consumer_secret=mkm_app_secret,
# access_token=mkm_access_token,
# access_token_secret=mkm_token_secret,
# )
session = requests_oauthlib.OAuth1Session(
mkm_app_token,
client_secret=mkm_app_secret,
resource_owner_key=mkm_access_token,
resource_owner_secret=mkm_token_secret,
)
r = session.get(url)
print(r)
When I look at debugging info, everything seems fine (of course besides 403 response):
DEBUG:requests_oauthlib.oauth1_auth:Signing request <PreparedRequest [GET]> using client <Client nonce=None, signature_method=HMAC-SHA1, realm=None, encoding=utf-8, timestamp=None, resource_owner_secret=****, decoding=utf-8, verifier=None, signature_type=AUTH_HEADER, rsa_key=None, resource_owner_key=LQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo, client_secret=****, callback_uri=None, client_key=B7VI9Qg2xh855WtR>
DEBUG:requests_oauthlib.oauth1_auth:Including body in call to sign: False
DEBUG:oauthlib.oauth1.rfc5849:Collected params: [(u'oauth_nonce', u'87129670621454425921416648590'), (u'oauth_timestamp', u'1416648590'), (u'oauth_consumer_key', u'B7VI9Qg2xh855WtR'), (u'oauth_signature_method', u'HMAC-SHA1'), (u'oauth_version', u'1.0'), (u'oauth_token', u'LQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo')]
DEBUG:oauthlib.oauth1.rfc5849:Normalized params: oauth_consumer_key=B7VI9Qg2xh855WtR&oauth_nonce=87129670621454425921416648590&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1416648590&oauth_token=LQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo&oauth_version=1.0
DEBUG:oauthlib.oauth1.rfc5849:Normalized URI: https://sandbox.mkmapi.eu/ws/v1.1/account
DEBUG:oauthlib.oauth1.rfc5849:Base signing string: GET&https%3A%2F%2Fsandbox.mkmapi.eu%2Fws%2Fv1.1%2Faccount&oauth_consumer_key%3DB7VI9Qg2xh855WtR%26oauth_nonce%3D87129670621454425921416648590%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1416648590%26oauth_token%3DLQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo%26oauth_version%3D1.0
DEBUG:oauthlib.oauth1.rfc5849:Signature: 291LTesHZR6W4bjZ1NqSW5hEgoM=
DEBUG:oauthlib.oauth1.rfc5849:Encoding URI, headers and body to utf-8.
DEBUG:requests_oauthlib.oauth1_auth:Updated url: https://sandbox.mkmapi.eu/ws/v1.1/account
DEBUG:requests_oauthlib.oauth1_auth:Updated headers: {'Accept': '*/*', 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate', 'Authorization': 'OAuth oauth_nonce="87129670621454425921416648590", oauth_timestamp="1416648590", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", oauth_consumer_key="B7VI9Qg2xh855WtR", oauth_token="LQj2rUwOFUJsmuJvCTlny1UzGZSXzHjo", oauth_signature="291LTesHZR6W4bjZ1NqSW5hEgoM%3D"', 'User-Agent': 'python-requests/2.4.3 CPython/2.7.8 Darwin/14.0.0'}
DEBUG:requests_oauthlib.oauth1_auth:Updated body: None
INFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): sandbox.mkmapi.eu
DEBUG:requests.packages.urllib3.connectionpool:"GET /ws/v1.1/account HTTP/1.1" 403 None
This is not an issue of authentication details, which are provided on account profile page when you request dedicated application API access, since those details work fine with PHP example provided by the site: https://www.mkmapi.eu/ws/documentation/API:Auth_libcurl
When I go through site's documentation, nothing seems out of ordinary: https://www.mkmapi.eu/ws/documentation/API:Auth_Overview
I honestly don't know where to go from here...
I realized that the code above with requests_oauthlib didn't build the header like it was layed out in the documentation, so I ended up inventing the wheel again and building the header myself, following the steps outlined in the documentation: https://www.mkmapi.eu/ws/documentation/API:Auth_OAuthHeader
The following script is not very beautiful, but it does its job.
import requests
from urllib import quote_plus as rawurlencode
import time
import string
import random
import operator
from hashlib import sha1
from hmac import new as hmac
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# personal Info - taken from https://www.mkmapi.eu/ws/documentation/API:Auth_Overview
mkmAppToken = 'bfaD9xOU0SXBhtBP'
mkmAppSecret = 'pChvrpp6AEOEwxBIIUBOvWcRG3X9xL4Y'
mkmAccessToken = 'lBY1xptUJ7ZJSK01x4fNwzw8kAe5b10Q'
mkmAccessSecret = 'hc1wJAOX02pGGJK2uAv1ZOiwS7I9Tpoe'
# Url to access on mkm
# note that this deviates from the example in the header documentation (https://www.mkmapi.eu/ws/documentation/API:Auth_OAuthHeader) which uses
#accessUrl = 'https://www.mkmapi.eu/ws/v1.1/account'
accessUrl = 'https://www.mkmapi.eu/ws/v1.1/output.json/account'
#Method for access
MyMethod = "GET"
baseString = MyMethod + "&" + rawurlencode(accessUrl) + "&"
# create a random string
# the documentation in https://www.mkmapi.eu/ws/documentation/API:Auth_OAuthHeader uses
#nonce = 53eb1f44909d6
nonce = id_generator(8)
# what time is it?
# the documentation in https://www.mkmapi.eu/ws/documentation/API:Auth_OAuthHeader uses
#now = 1407917892
now = str(int(time.time()))
MyOauthmethod = "HMAC-SHA1"
MyOauthver = "1.0"
# define Parameters and values, order doesn't matter
paramDict ={"oauth_consumer_key":mkmAppToken, "oauth_token" :mkmAccessToken, "oauth_nonce":nonce, "oauth_timestamp":now, "oauth_signature_method":MyOauthmethod, "oauth_version":MyOauthver}
# sorting of parameters is done here
sorted_paramDict = sorted(paramDict.items(), key=operator.itemgetter(0))
#collect the full parameters string
paramStr = ''
for kv in sorted_paramDict:
paramStr = paramStr + kv[0] + "=" + kv[1] + "&"
# and get rid of the trailing ampersand
paramStr = paramStr[:-1]
#concatenate request and oauth parameters
baseString = baseString + rawurlencode(paramStr)
# concatenate both keys
signingKey = rawurlencode(mkmAppSecret) + "&" + rawurlencode(mkmAccessSecret)
# and create a hased signature with the key and the baseString
Signature = hmac(signingKey, baseString, sha1).digest().encode('base64')[:-1]
# construct the header from the parameters and the URL and the signature
MyHeader = 'OAuth ' + 'realm="' + accessUrl + '", '
for kv in sorted_paramDict:
MyHeader += kv[0] + '="' + kv[1] + '",'
MyHeader += 'oauth_signature="' + Signature +'"'
headers = {'Authorization': MyHeader}
# and now requests can do its magic (pun intended)
r = requests.get(accessUrl, headers=headers)
outjson = r.json()
You need to provide the realm as an argument to the OAuth1Session, like so:
session = requests_oauthlib.OAuth1Session(
mkm_app_token,
client_secret=mkm_app_secret,
resource_owner_key=mkm_access_token,
resource_owner_secret=mkm_token_secret,
realm=url
)
Other things I have run into in the past include the fact that the mkm api doesn't (or at least didn't) accept URI-escaped parameters, so you may need to unescape them.
For anyone who's reading in 2020, there's no need to reinvent the wheel, just pass the Oauth header and the parameters to requests, here's an example with metaproducts/find:
import requests
from requests_oauthlib import OAuth1
import json
import passwords
card_name = 'Tarmogoyf'
output = 'output.json'
base_url = 'https://api.cardmarket.com/ws/v2.0/' + output + '/'
url = base_url + 'metaproducts/find'
params={'search': card_name}
headeroauth = OAuth1(
realm = url,
client_key = passwords.mkm_app_token,
client_secret = passwords.mkm_app_secret,
resource_owner_key = passwords.mkm_access_token,
resource_owner_secret = passwords.mkm_token_secret,
)
response = requests.get(
url,
params,
auth = headeroauth
)
if (response.ok == True):
json_response = response.json()
print(json.dumps(json_response, indent=4, sort_keys=True))
else:
print(str(response.status_code) + " " + response.reason)
exit()
The /output.json/ part of the string makes the output JSON instead of XML

Categories

Resources