After running successfully i suddenly see below error for python script. Not much experienced in python. The script fetch's information over API. Python 2.7.12
/usr/local/lib/python2.7/dist-packages/requests/__init__.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown.
warnings.warn(warning, RequestsDependencyWarning)
Traceback (most recent call last):
File "fetch-drives-ncpa.py", line 31, in <module>
data = r.json()
NameError: name 'r' is not defined
Below is the script.
# importing the requests library
import requests
import json
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# defining a params dict for the parameters to be sent to the API
PARAMS = {'token':'dddsxsdsdsd','units':'l'}
openfiledrives = open("device_drivelist.txt", 'w')
openfiledrives.truncate(0)
openfile = open('device_list.txt')
for devicename in openfile.readlines():
devicename = devicename.strip()
# api-endpoint
URL = "https://"+devicename+":5666/api/"
try:
r = requests.get(url = URL, params = PARAMS, verify=False,timeout=30)
r.raise_for_status()
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("OOps: Something Else",err)
# extracting data in json format
data = r.json()
Machine = data['root']['system']['node']
# print the keys and values
for i in data['root']['disk']['logical']:
Drive = data['root']['disk']['logical'][i]['device_name']
FreeSpace = data['root']['disk']['logical'][i]['free']
TotalSpace = data['root']['disk']['logical'][i]['total_size']
FSType=data['root']['disk']['logical'][i]['opts']
#print Machine.lower(),Drive[0],FreeSpace[0],TotalSpace[0]
#openfiledrives.write('{0}\t{1}\t{2:.0f}\t{3:.0f}\n'.format(Machine.lower(),Drive[0],FreeSpace[0],TotalSpace[0]))
if FSType != 'ro,cdrom':
openfiledrives.write('{0}\t{1}\t{2:.0f}\n'.format(Machine.lower(),Drive[0],FreeSpace[0]))
openfile.close()
openfiledrives.close()
If requests.get raises an exception, no value is ever assigned to r. But you still try to call r.json() following that exception.
Related
I am using a thread pool to send requests in parallel (generate a list of urls, fetch in parallel, request all of the urls concurrently), parsing some data out of the response to a couple of dicts, cross-mapping keys across dicts and then writing it back to pandas.DataFrame
def fetch_point(point_url):
try:
r = requests.get(point_url, headers=self.headers)
r.raise_for_status()
except requests.exceptions.HTTPError as errh:
logging.error(f'HTTP Error: {errh}')
except requests.exceptions.ConnectionError as errc:
logging.error(f'Connection Error: {errc}')
except requests.exceptions.Timeout as errt:
logging.error(f'Timeout Error: {errt}')
except requests.exceptions.RequestException as e:
logging.error(e)
raise SystemExit(e)
result = r.json().get('data')
# print(result)
building_url = result['building']
return point_url, building_url
def fetch_building(building_url):
try:
r = requests.get(building_url, headers=self.headers)
r.raise_for_status()
except requests.exceptions.HTTPError as errh:
logging.error(f'HTTP Error: {errh}')
except requests.exceptions.ConnectionError as errc:
logging.error(f'Connection Error: {errc}')
except requests.exceptions.Timeout as errt:
logging.error(f'Timeout Error: {errt}')
except requests.exceptions.RequestException as e:
logging.error(e)
raise SystemExit(e)
result = r.json().get('data')
building_name = result['name']
return building_url, building_name
pool = ThreadPoolExecutor()
point_urls = df.loc[~df['Point Url'].isnull(), 'Point Url'].to_list()
building_urls = {}
for point_url, building_url in pool.map(fetch_point, point_urls):
building_urls[point_url] = building_url
building_urls_list = building_urls.values()
building_names = {}
for building_url, building_name in pool.map(fetch_building, building_urls_list):
building_names[building_url] = building_name
point_building_map = {k: building_names[building_urls[k]] for k in building_urls}
for key in point_building_map.keys():
df.loc[df['Point Url'] == key, 'Building'] = point_building_map[key]
I am wondering if there is a more optimized approach I could consider. Should I go for asyncio/aiohttp instead of ThreadPoolExecutor?
The code below produces these errors:
'Response' object has no attribute 'read'
Please help me understand what I did wrong. I made sure un and pwd are correct
user = "un"
password = 'pwd'
datanodes = ["https://server040:25000/"]
for i, datanode in enumerate(datanodes):
print("Checking {0}: {1}".format(i, datanode))
try:
print "trying"
response = requests.get(datanode + "queries?json",auth=(user,
password), verify='certs.pem')
print response
data = json.loads(response.read())
print data
if data["num_waiting_queries"] > 0:
print(data["num_waiting_queries"])
for in_flight_query in data["in_flight_queries"]:
if in_flight_query["waiting"] is True and
in_flight_query['state'] == "FINISHED":
cancel_url = datanode + "cancel_query?query_id=
{}".format(in_flight_query['query_id'])
print(cancel_url)
except IOError as ioe:
print ioe
except Exception as e:
print(e)
I have tried both json.load(reponse) and json.loads(response.read())
I was able to get around the issues by adding HTTPDigestAuth and changing data = json.loads(response.read()) to data = response.json()
I am a decently new python coder and i wish to create a twitter bot in which everytime it retweets, it favourites the tweet as well. I am not exactly sure how to do that but when the bot searches, it sends out an error message of 'list index out of range'.
import tweepy, time, traceback
from tweepy.auth import OAuthHandler
from tweepy.streaming import StreamListener, Stream
ckey = ''
csecret = ''
atoken = ''
asecret = ''
auths = OAuthHandler(ckey, csecret)
auths.set_access_token(atoken, asecret)
api = tweepy.API(auths)
class listener(StreamListener):
def on_data(self, raw_data):
try:
tweet_text = raw_data.lower().split('"text":')[1].split('","source":"')[0].replace(",", "")
screen_name = raw_data.lower().split('"screen_name":"')[1].split('","location"')[0].replace(",", "")
tweet_cid = raw_data.split('"id:')[1].split('"id_str":')[0].replace(",", "")
#there is ment to be 4 spaces at tweet_text
accs = [''] # banned accounts screen name goes in here
words = ['hate' , 'derp' , 'racist' , 'evil' , 'keemstar' , 'mario' , 'kirby'] #banned words goes in here
if not any(acc in screen_name.lower() for acc in accs):
if not any(word in tweet_text.lower() for word in words):
fav(tweet_cid)
follow(screen_name)
retweet(tweet_cid)
tweet(myinput)
#call what u want to do here
#fav(tweet_cid)
#retweet(tweet_cid)
return True
except Exception as e:
print (str(e)) # prints the error message, if you dont want it to comment it out.
pass
def on_error(self, status_code):
try:
print( "error" + status_code)
except Exception as e:
print(str(e))
pass
def retweet(tweet_cid):
try:
api.retweet(tweet_cid)
time.sleep(random.randit(range(50,900)))
except Exception as e:
print(str(e))
pass
def follow(screen_name):
try:
api.create_friendship(screen_name)
time.sleep(random.randit(range(50,900)))
except Exception as e:
print(str(e))
pass
def fav(tweet_cid):
try:
api.create_favourite(tweet_cid)
time.sleep(random.randit(range(600,1100)))
except Exception as e:
print(str(e))
pass
def unfav(tweet_cid):
try:
api.destroy_tweet(tweet_cid)
time.sleep(random.randit(range(8000,9000)))
except Exception as e:
print(str(e))
pass
def tweet(myinput):
try:
api.update_status(myinput)
time.sleep(random.randit(range(1000,4000)))
except Exception as e:
print(str(e))
pass
# tags below
track_words = [""] #deleted all tags so easier to read
follow_acc = [] # all username converted to user ids
try:
twt = Stream(auths, listener())
twt.filter(track=track_words, follow = follow_acc)
except Exception as e:
print (str(e))
pass
Is this what you are asking for? It gives the stack trace of the exception.
import traceback
try:
s='hi'
s=s+1
except Exception as e:
print(traceback.format_exc())
Output:
Traceback (most recent call last):
File "<stdin>", line 3, in <module>
TypeError: cannot concatenate 'str' and 'int' objects
Hope this helps! :)
I always get a Type Error when I run the following python code (abc.py) as follows:
./abc.py activatelink alphabeta
Type Error: ['alphabeta']
My code:
#!/usr/bin/python
import urllib2
from urllib2 import URLError
from urllib2 import HTTPError
import requests
import urllib
import json
import time
import os
import sys
import hashlib
def activate_user(link):
print invoke_rest('GET', link)
def invoke_rest(request_type, rest_url, payload, headers):
try:
api_url = rest_url
if request_type == 'GET':
r = requests.get(api_url)
to_ret = {'code':r.status_code, 'reply':r.text}
return to_ret
elif request_type == 'POST':
r = requests.post(api_url, data=payload, headers=headers)
to_ret = {'code':r.status_code, 'reply':r.text}
return to_ret
else:
return "Invalid request type ", request_type
except Exception, e:
return "Exception:", e, " in getting the API call"
def help():
print ('Usage: %s { activate | help }', os.path.basename(sys.argv[0])
if __name__ == '__main__':
actions = {'activatelink': activate_user, 'help': help}
try:
action = str(sys.argv[1])
except IndexError:
print "IndexError: ", sys.argv[1]
action = 'help'
args = sys.argv[2:]
try:
actions[action](*args)
except (KeyError):
print "Key Error:", args
help()
except (TypeError):
print "Type Error:", args
help()
Am I doing anything wrong? I added some other functions other than activatelink, which work fine, can anyone point out whats wrong in here?
Your invoke_rest() function takes four arguments:
def invoke_rest(request_type, rest_url, payload, headers):
but you pass in just the two:
print invoke_rest('GET', link)
That raises a TypeError exception:
>>> def invoke_rest(request_type, rest_url, payload, headers):
... pass
...
>>> invoke_rest('GET', 'alphabeta')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: invoke_rest() takes exactly 4 arguments (2 given)
Perhaps you wanted those two extra arguments (payload and headers) to be optional. If so, make them keyword arguments and set their default value to None:
def invoke_rest(request_type, rest_url, payload=None, headers=None):
which is fine by the requests library.
I'm using python 3.3.0 in Windows 7.
I have two files: dork.txt and fuzz.py
dork.txt contains following:
/about.php?id=1
/en/company/news/full.php?Id=232
/music.php?title=11
fuzz.py contains following:
srcurl = "ANY-WEBSITE"
drkfuz = open("dorks.txt", "r").readlines()
print("\n[+] Number of dork names to be fuzzed:",len(drkfuz))
for dorks in drkfuz:
dorks = dorks.rstrip("\n")
srcurl = "http://"+srcurl+dorks
requrl = urllib.request.Request(srcurl)
#httpreq = urllib.request.urlopen(requrl)
# Starting the request
try:
httpreq = urllib.request.urlopen(requrl)
except urllib.error.HTTPError as e:
print ("[!] Error code: ", e.code)
print("")
#sys.exit(1)
except urllib.error.URLError as e:
print ("[!] Reason: ", e.reason)
print("")
#sys.exit(1)
#if e.code != 404:
if httpreq.getcode() == 200:
print("\n*****srcurl********\n",srcurl)
return srcurl
So, when I enter the correct website name which has /about.php?id=1, it works fine.
But when I provide the website which has /en/company/news/full.php?Id=232, it first
prints Error code: 404 and then gives me the following error: UnboundLocalError: local
variable 'e' referenced before assignment or UnboundLocalError: local variable 'httpreq' referenced before assignment
I can understand that if the website doesn't have the page which contains /about.php?id=1, it gives Error code: 404 but why it's not going back in the for loop to check the remaining dorks in the text file??? Why it stops here and throws an error?
I want to make a script to find out valid page from just a website address like: www.xyz.com
When the line urllib.request.urlopen(requrl) expression throws an exception, the variable httpreq is never set. You could set it to None before the try statement, then test if it is still None afterwards:
httpreq = None
try:
httpreq = urllib.request.urlopen(requrl)
# ...
if httpreq is not None and httpreq.getcode() == 200:
srcurl = "ANY-WEBSITE"
drkfuz = open("dorks.txt", "r").readlines()
print("\n[+] Number of dork names to be fuzzed:",len(drkfuz))
for dorks in drkfuz:
dorks = dorks.rstrip("\n")
srcurl = "http://"+srcurl+dorks
try:
requrl = urllib.request.Request(srcurl)
if requrl != None and len(requrl) > 0:
try:
httpreq = urllib.request.urlopen(requrl)
if httpreq.getcode() == 200:
print("\n*****srcurl********\n",srcurl)
return srcurl
except:
# Handle exception
pass
except:
# Handle your exception
print "Exception"
Untested code, but it will work logically.