The code below produces these errors:
'Response' object has no attribute 'read'
Please help me understand what I did wrong. I made sure un and pwd are correct
user = "un"
password = 'pwd'
datanodes = ["https://server040:25000/"]
for i, datanode in enumerate(datanodes):
print("Checking {0}: {1}".format(i, datanode))
try:
print "trying"
response = requests.get(datanode + "queries?json",auth=(user,
password), verify='certs.pem')
print response
data = json.loads(response.read())
print data
if data["num_waiting_queries"] > 0:
print(data["num_waiting_queries"])
for in_flight_query in data["in_flight_queries"]:
if in_flight_query["waiting"] is True and
in_flight_query['state'] == "FINISHED":
cancel_url = datanode + "cancel_query?query_id=
{}".format(in_flight_query['query_id'])
print(cancel_url)
except IOError as ioe:
print ioe
except Exception as e:
print(e)
I have tried both json.load(reponse) and json.loads(response.read())
I was able to get around the issues by adding HTTPDigestAuth and changing data = json.loads(response.read()) to data = response.json()
Related
I am trying to work on a website and the code works fine, but sometimes the response text has a specific string error happened. If that string appears, I need to send request to that item again
Here's my try but I still got some results with that string error happened
for item in mylist:
while True:
response = requests.get(f'myurl/{item}', headers=headers)
res_text = response.text
if 'SUCESSFUL EXECUTION' in res_text:
scraped_item = (item, 'PAY IT')
else:
json_data=json.loads(res_text)
scraped_item = (item, json_data['errorMsg'])
print(scraped_item)
results.append(scraped_item)
if not 'error happened' in res_text:break
I could solve it using while True trick
for item in mylist:
while True:
response = requests.get(f'myurl/{item}', headers=headers)
res_text = response.text
if not 'error happened' in res_text:
break
if 'SUCESSFUL EXECUTION' in res_text:
scraped_item = (item, 'PAY IT')
else:
json_data=json.loads(res_text)
scraped_item = (item, json_data['errorMsg'])
print(scraped_item)
results.append(scraped_item)
time.sleep(1)
I have an error handling module that I'm using in my main script to make requests to an API. I want to return "response" and "data" to use in the main script. It works up until trying to print "response". Apologies for inconsistencies, I am obviously still learning. And I won't learn without making a few mistakes first. I appreciate constructive criticism.
my_module
import requests
import json
def errorHandler(url):
try:
response = requests.get(url, timeout=5)
status = response.status_code
data = response.json()
except requests.exceptions.Timeout:
print "Timeout error.\n"
except requests.exceptions.ConnectionError:
print "Connection error.\n"
except ValueError:
print "ValueError: No JSON object could be decoded.\n"
else:
if response.status_code == 200:
print "Status: 200 OK \n"
elif response.status_code == 400:
print "Status: " + str(status) + " error. Bad request."
print "Correlation ID: " + str(data['correlationId']) + "\n"
else:
print "Status: " + str(status) + " error.\n"
return response
return data
my_script
errorHandler("https://api.weather.gov/alerts/active")
print "Content type is " + response.headers['content-type'] +".\n" #expect geo+json
# I need the data from the module to do this, but not for each get request
nwsId = data['features'][0]['properties']['id']
error
Traceback (most recent call last):
File "my_script.py", line 20, in <module>
print errorHandler.response
AttributeError: 'function' object has no attribute 'response'
If you want to return multiple values, you return them as a tuple in a single statement:
return response, data
Then in the caller, you assign them to variables with a tuple assignment:
response, data = errorHandler("https://api.weather.gov/alerts/active")
print "Content type is " + response.headers['content-type'] +".\n"
nwsId = data['features'][0]['properties']['id']
However, your function will not work correctly if any of the exceptions occur. If there's an exception, it won't set the variables response or data, so when it tries to return them it will get an error.
I'm playing around the Twitter API and am in the process of developing a script to pull all Tweets with a certain hashtag down to a local mongoDB. I have it working fine when I'm downloading tweets from users, but when downloading tweets from a hashtag I get:
return loads(fp.read(),
AttributeError: 'int' object has no attribute 'read'
Can anyone offer their infinite wisdom into how I could get this script to work?
To run, save it as a .py file, cd to the folder and run:
python twitter.py
Code:
__author__ = 'Tom Cusack'
import pymongo
import oauth2 as oauth
import urllib2, json
import sys, argparse, time
def oauth_header(url, consumer, token):
params = {'oauth_version': '1.0',
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
}
req = oauth.Request(method = 'GET',url = url, parameters = params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(),consumer, token)
return req.to_header()['Authorization'].encode('utf-8')
def main():
### Twitter Settings
numtweets = '32000'
verbose = 'store_true'
retweet = 'store_false'
CONSUMER_KEY = 'M7Xu9Wte0eIZvqhb4G9HnIn3G'
CONSUMER_SECRET = 'c8hB4Qwps2aODQUx7UsyzQuCRifEp3PKu6hPQll8wnJGIhbKgZ'
ACCESS_TOKEN = '3213221313-APuXuNjVMbRbZpu6sVbETbgqkponGsZJVT53QmG'
ACCESS_SECRET = 'BJHrqWC9ed3pA5oDstSMCYcUcz2pYF3DmJ7jcuDe7yxvi'
base_url = url = 'https://api.twitter.com/1.1/search/tweets.json?include_entities=true&count=200&q=#mongodb&include_rts=%s' % (retweet)
oauth_consumer = oauth.Consumer(key = CONSUMER_KEY, secret = CONSUMER_SECRET)
oauth_token = oauth.Token(key = ACCESS_TOKEN, secret = ACCESS_SECRET)
### Mongodb Settings
uri = 'mongodb://127.0.0.1:27017/SARKY'
if uri != None:
try:
conn = pymongo.MongoClient(uri)
print 'Pulling Tweets..'
except:
print 'Error: Unable to connect to DB. Check uri variable.'
return
uri_parts = pymongo.uri_parser.parse_uri(uri)
db = conn[uri_parts['database']]
db['twitter-harvest'].ensure_index('id_str')
### Helper Variables for Harvest
max_id = -1
tweet_count = 0
stream = 0
### Begin Harvesting
while True:
auth = oauth_header(url, oauth_consumer, oauth_token)
headers = {"Authorization": auth}
request = urllib2.Request(url, headers = headers)
try:
stream = urllib2.urlopen(request)
except urllib2.HTTPError, err:
if err.code == 404:
print 'Error: Unknown user. Check --user arg'
return
if err.code == 401:
print 'Error: Unauthorized. Check Twitter credentials'
return
tweet_list = json.load(stream)
if len(tweet_list) == 0:
print 'No tweets to harvest!'
return
if 'errors' in tweet_list:
print 'Hit rate limit, code: %s, message: %s' % (tweets['errors']['code'], tweets['errors']['message'])
return
if max_id == -1:
tweets = tweet_list
else:
tweets = tweet_list[1:]
if len(tweets) == 0:
print 'Finished Harvest!'
return
for tweet in tweets:
max_id = id_str = tweet['id_str']
try:
if tweet_count == numtweets:
print 'Finished Harvest- hit numtweets!'
return
if uri != None:
db[user].update({'id_str':id_str},tweet,upsert = True)
else:
print tweet['text']
tweet_count+=1
if verbose == True and uri != None:
print tweet['text']
except Exception, err:
print 'Unexpected error encountered: %s' %(err)
return
url = base_url + '&max_id=' + max_id
if __name__ == '__main__':
try:
main()
except SystemExit as e:
if e.code == 0:
pass
You initially set stream = 0. When your try...except block catches a HTTP response with a code that isn't 404 or 401, stream is still equal to 0, but your except block doesn't break out of the function.
I'd look more closely at what this response says.
I'm using python 3.3.0 in Windows 7.
I have two files: dork.txt and fuzz.py
dork.txt contains following:
/about.php?id=1
/en/company/news/full.php?Id=232
/music.php?title=11
fuzz.py contains following:
srcurl = "ANY-WEBSITE"
drkfuz = open("dorks.txt", "r").readlines()
print("\n[+] Number of dork names to be fuzzed:",len(drkfuz))
for dorks in drkfuz:
dorks = dorks.rstrip("\n")
srcurl = "http://"+srcurl+dorks
requrl = urllib.request.Request(srcurl)
#httpreq = urllib.request.urlopen(requrl)
# Starting the request
try:
httpreq = urllib.request.urlopen(requrl)
except urllib.error.HTTPError as e:
print ("[!] Error code: ", e.code)
print("")
#sys.exit(1)
except urllib.error.URLError as e:
print ("[!] Reason: ", e.reason)
print("")
#sys.exit(1)
#if e.code != 404:
if httpreq.getcode() == 200:
print("\n*****srcurl********\n",srcurl)
return srcurl
So, when I enter the correct website name which has /about.php?id=1, it works fine.
But when I provide the website which has /en/company/news/full.php?Id=232, it first
prints Error code: 404 and then gives me the following error: UnboundLocalError: local
variable 'e' referenced before assignment or UnboundLocalError: local variable 'httpreq' referenced before assignment
I can understand that if the website doesn't have the page which contains /about.php?id=1, it gives Error code: 404 but why it's not going back in the for loop to check the remaining dorks in the text file??? Why it stops here and throws an error?
I want to make a script to find out valid page from just a website address like: www.xyz.com
When the line urllib.request.urlopen(requrl) expression throws an exception, the variable httpreq is never set. You could set it to None before the try statement, then test if it is still None afterwards:
httpreq = None
try:
httpreq = urllib.request.urlopen(requrl)
# ...
if httpreq is not None and httpreq.getcode() == 200:
srcurl = "ANY-WEBSITE"
drkfuz = open("dorks.txt", "r").readlines()
print("\n[+] Number of dork names to be fuzzed:",len(drkfuz))
for dorks in drkfuz:
dorks = dorks.rstrip("\n")
srcurl = "http://"+srcurl+dorks
try:
requrl = urllib.request.Request(srcurl)
if requrl != None and len(requrl) > 0:
try:
httpreq = urllib.request.urlopen(requrl)
if httpreq.getcode() == 200:
print("\n*****srcurl********\n",srcurl)
return srcurl
except:
# Handle exception
pass
except:
# Handle your exception
print "Exception"
Untested code, but it will work logically.
i'm vey new to python and i wrote this code
def geturl(url):
url = urllib.quote(url,'/:')
log( __name__ ,"Getting url:[%s]" % url)
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
content = response.read()
return_url = response.geturl()
response.close()
except Exception, e:
log( __name__ ,"Failed to get url because %s" % str(e))
content = None
return_url = None
return(content, return_url)
def myFunc():
searchurl="http://www.google.com"
socket.setdefaulttimeout(3)
content, response_url = geturl(searchurl)
if content is None:
log( __name__ , "Content is None!!")
content = content.replace("string1", "string2")
when i run it, i get this error: NoneType object is not callable.
i can't understand why... i learned that NoneType is the type of None, but i check if "content" variable is None and it's not.
thx for your help
You're not giving the erroneous line, however I suspect the following to be wrong:
if content is None:
log( __name__ , "Content is None!!")
content = content.replace("string1", "string2")
When content is indeed None, a log is emitted but the function keeps executing, until the call to replace raises an exception.