Python handling socket.error: [Errno 104] Connection reset by peer - python

When using Python 2.7 with urllib2 to retrieve data from an API, I get the error [Errno 104] Connection reset by peer. Whats causing the error, and how should the error be handled so that the script does not crash?
ticker.py
def urlopen(url):
response = None
request = urllib2.Request(url=url)
try:
response = urllib2.urlopen(request).read()
except urllib2.HTTPError as err:
print "HTTPError: {} ({})".format(url, err.code)
except urllib2.URLError as err:
print "URLError: {} ({})".format(url, err.reason)
except httplib.BadStatusLine as err:
print "BadStatusLine: {}".format(url)
return response
def get_rate(from_currency="EUR", to_currency="USD"):
url = "https://finance.yahoo.com/d/quotes.csv?f=sl1&s=%s%s=X" % (
from_currency, to_currency)
data = urlopen(url)
if "%s%s" % (from_currency, to_currency) in data:
return float(data.strip().split(",")[1])
return None
counter = 0
while True:
counter = counter + 1
if counter==0 or counter%10:
rateEurUsd = float(get_rate('EUR', 'USD'))
# does more stuff here
Traceback
Traceback (most recent call last):
File "/var/www/testApp/python/ticker.py", line 71, in <module>
rateEurUsd = float(get_rate('EUR', 'USD'))
File "/var/www/testApp/python/ticker.py", line 29, in get_exchange_rate
data = urlopen(url)
File "/var/www/testApp/python/ticker.py", line 16, in urlopen
response = urllib2.urlopen(request).read()
File "/usr/lib/python2.7/urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib/python2.7/urllib2.py", line 406, in open
response = meth(req, response)
File "/usr/lib/python2.7/urllib2.py", line 519, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python2.7/urllib2.py", line 438, in error
result = self._call_chain(*args)
File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 625, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "/usr/lib/python2.7/urllib2.py", line 406, in open
response = meth(req, response)
File "/usr/lib/python2.7/urllib2.py", line 519, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python2.7/urllib2.py", line 438, in error
result = self._call_chain(*args)
File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 625, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "/usr/lib/python2.7/urllib2.py", line 400, in open
response = self._open(req, data)
File "/usr/lib/python2.7/urllib2.py", line 418, in _open
'_open', req)
File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 1207, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/usr/lib/python2.7/urllib2.py", line 1180, in do_open
r = h.getresponse(buffering=True)
File "/usr/lib/python2.7/httplib.py", line 1030, in getresponse
response.begin()
File "/usr/lib/python2.7/httplib.py", line 407, in begin
version, status, reason = self._read_status()
File "/usr/lib/python2.7/httplib.py", line 365, in _read_status
line = self.fp.readline()
File "/usr/lib/python2.7/socket.py", line 447, in readline
data = self._sock.recv(self._rbufsize)
socket.error: [Errno 104] Connection reset by peer
error: Forever detected script exited with code: 1

"Connection reset by peer" is the TCP/IP equivalent of slamming the phone back on the hook. It's more polite than merely not replying, leaving one hanging. But it's not the FIN-ACK expected of the truly polite TCP/IP converseur. (From other SO answer)
So you can't do anything about it, it is the issue of the server.
But you could use try .. except block to handle that exception:
from socket import error as SocketError
import errno
try:
response = urllib2.urlopen(request).read()
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # Not error we are looking for
pass # Handle error here.

You can try to add some time.sleep calls to your code.
It seems like the server side limits the amount of requests per timeunit (hour, day, second) as a security issue. You need to guess how many (maybe using another script with a counter?) and adjust your script to not surpass this limit.
In order to avoid your code from crashing, try to catch this error with try .. except around the urllib2 calls.

There is a way to catch the error directly in the except clause with ConnectionResetError, better to isolate the right error.
This example also catches the timeout.
from urllib.request import urlopen
from socket import timeout
url = "http://......"
try:
string = urlopen(url, timeout=5).read()
except ConnectionResetError:
print("==> ConnectionResetError")
pass
except timeout:
print("==> Timeout")
pass

there are 2 solution you can try.
request too frequently.
try sleep after per request
time.sleep(1)
the server detect the request client is python, so reject.
add User-Agent in header to handle this.
headers = {
"Content-Type": "application/json;charset=UTF-8",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)"
}
try:
res = requests.post("url", json=req, headers=headers)
except Exception as e:
print(e)
pass
the second solution save me

Related

Keep receiving "HTTP Error 429: Too Many Requests" with any delay

A simple web scraping code I wrote few weeks back keeps coming up with the error of:
HTTP Error 429: Too Many Requests
The code is designed to get the input from an excel file and find and download pdfs online.
I'm not too familiar with requests but I've slowed down the number of requests to see how many it can handle. It seems to be an unrelated issue somehow. The code will go through similar number of inputs (around 30) no matter if the delays I sat are at 5 seconds or 20 seconds. Here is the error message that keeps coming up:
Traceback (most recent call last):
File "D:\Python\New folder\Web Scraper.py", line 17, in <module>
for url in search(searchquery, stop=1, pause=2):
File "D:\Python\lib\site-packages\google-2.0.2-py3.7.egg\googlesearch\__init__.py", line 288, in search
html = get_page(url, user_agent)
File "D:\Python\lib\site-packages\google-2.0.2-py3.7.egg\googlesearch\__init__.py", line 154, in get_page
response = urlopen(request)
File "D:\Python\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "D:\Python\lib\urllib\request.py", line 531, in open
response = meth(req, response)
File "D:\Python\lib\urllib\request.py", line 641, in http_response
'http', request, response, code, msg, hdrs)
File "D:\Python\lib\urllib\request.py", line 563, in error
result = self._call_chain(*args)
File "D:\Python\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "D:\Python\lib\urllib\request.py", line 755, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "D:\Python\lib\urllib\request.py", line 531, in open
response = meth(req, response)
File "D:\Python\lib\urllib\request.py", line 641, in http_response
'http', request, response, code, msg, hdrs)
File "D:\Python\lib\urllib\request.py", line 569, in error
return self._call_chain(*args)
File "D:\Python\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "D:\Python\lib\urllib\request.py", line 649, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 429: Too Many Requests
And here is the code that I wrote:
import xlrd, requests
from googlesearch import search
from time import sleep
xlloc = ("D:/VesselBase.xlsx")
#Excel location
ws = xlrd.open_workbook(xlloc)
sheet = ws.sheet_by_index(0)
#Sheet name/index
sheet.cell_value(0, 0)
for i in range(sheet.nrows):
vesselname = sheet.cell_value(i, 1)
vesselimo = sheet.cell_value(i,0)
#Which column/row to choose, 2nd column for vessels. 0=A/1.
searchquery = 'Vessel specification information "%s" OR "%s" filetype:pdf' % (vesselname, vesselimo)
print('Searching "%s"' % searchquery)
for url in search(searchquery, stop=1, pause=20):
print('Searched for %s' % vesselname)
print('Found %s' % url)
open('D:/Newfolder/%s.pdf' % vesselname, 'wb').write(requests.get(url).content)
#Where to save
print('Saved %s' % vesselname)

HTTP ERROR in Python

I seem to be getting this error with urllib.request and it gives me this url error that i cant seem to fix.
raceback (most recent call last):
File "C:\Users\Jarvis\Documents\Python Scripts\MultiCheck by Koala.py", line 133, in <module>
Migration()
File "C:\Users\Jarvis\Documents\Python Scripts\MultiCheck by Koala.py", line 116, in Migration
rawdata_uuid = urllib.request.urlopen(url)
File "C:\Python34\lib\urllib\request.py", line 161, in urlopen
return opener.open(url, data, timeout)
File "C:\Python34\lib\urllib\request.py", line 469, in open
response = meth(req, response)
File "C:\Python34\lib\urllib\request.py", line 579, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python34\lib\urllib\request.py", line 507, in error
return self._call_chain(*args)
File "C:\Python34\lib\urllib\request.py", line 441, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 587, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 429: 42
The code im using is here is for a migration checker for a game:
def Migration():
url = "https://api.mojang.com/users/profiles/minecraft/" + einfos
rawdata = urllib.request.urlopen(url)
newrawdata = rawdata.read()
jsondata = json.loads(newrawdata.decode('utf-8'))
results = jsondata['id']
url = "https://sessionserver.mojang.com/session/minecraft/profile/" + results
rawdata_uuid = urllib.request.urlopen(url)
newrawdata_uuid = rawdata_uuid.read()
jsondata_uuid = json.loads(newrawdata_uuid.decode('utf-8'))
try:
results = jsondata_uuid['legacy']
print ("Unmigrated")
except:
print("Migrated")
Error 429 means: Too many requests. You seem to have hit a rate limit. The additional number gives are the seconds you have to wait for the limitation to be dropped. So, try again in 42s, or later.

python gets stuck loading page with mechanize

While using mechanize to open and process a lot pages (1000+) on a website I have hit a strange problem. Every now and then I get stuck trying to load a page, without timing out, the problem doesn't seem to be page specific as if I run it again and try to open the same page it works as a charm, but rather seem to happen at random.
I'm using this function to open pages
def openMechanize(br, url):
while True:
try:
print time.localtime()
print "opening: " + url
resp = br.open(url, timeout = 2.5)
print "done\n"
return resp
except Exception, errormsg:
print repr(errormsg)
print "failed to load page, retrying"
time.sleep(0.5)
When it gets stuck it makes the first print, current time and opening url, but never gets to the second one. I have tried to let it run for hours but nothing happens.
When interrupting the script with ctrl+c while it is stuck I get the following output:
File "test.py", line 143, in openMechanize
resp = br.open(url, timeout = 2.5)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_mechanize.py", line 203, in open
return self._mech_open(url, data, timeout=timeout)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_mechanize.py", line 230, in _mech_open
response = UserAgentBase.open(self, request, data)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_opener.py", line 193, in open
response = urlopen(self, req, data)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_urllib2_fork.py", line 344, in _open
'_open', req)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_urllib2_fork.py", line 332, in _call_chain
result = func(*args)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_urllib2_fork.py", line 1142, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/usr/local/lib/python2.7/dist-packages/mechanize/_urllib2_fork.py", line 1116, in do_open
r = h.getresponse()
File "/usr/lib/python2.7/httplib.py", line 1045, in getresponse
response.begin()
File "/usr/lib/python2.7/httplib.py", line 409, in begin
version, status, reason = self._read_status()
File "/usr/lib/python2.7/httplib.py", line 365, in _read_status
line = self.fp.readline(_MAXLINE + 1)
File "/usr/lib/python2.7/socket.py", line 476, in readline
data = self._sock.recv(self._rbufsize)
KeyboardInterrupt
Upon inspecting socket.py, where it gets stuck, I see the following:
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
Looks like it gets stuck in an endless loop as recv for some reason crashes
Has anyone experienced this error and found some sort of fix?

urllib2 retrieve an arbitrary file based on URL and save it into a named file

I am writing a python script to use the urllib2 module as an equivalent to the command line utility wget. The only function I want for this is that it can be used to retrieve an arbitrary file based on URL and save it into a named file. I also only need to worry about two command line arguments, the URL from which the file is to be downloaded and the name of the file into which the content are to be saved.
Example:
python Prog7.py www.python.org pythonHomePage.html
This is my code:
import urllib
import urllib2
#import requests
url = 'http://www.python.org/pythonHomePage.html'
print "downloading with urllib"
urllib.urlretrieve(url, "code.txt")
print "downloading with urllib2"
f = urllib2.urlopen(url)
data = f.read()
with open("code2.txt", "wb") as code:
code.write(data)
urllib seems to work but urllib2 does not seem to work.
Errors received:
File "Problem7.py", line 11, in <module>
f = urllib2.urlopen(url)
File "/usr/lib64/python2.6/urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib64/python2.6/urllib2.py", line 397, in open
response = meth(req, response)
File "/usr/lib64/python2.6/urllib2.py", line 510, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib64/python2.6/urllib2.py", line 429, in error
result = self._call_chain(*args)
File "/usr/lib64/python2.6/urllib2.py", line 369, in _call_chain
result = func(*args)
File "/usr/lib64/python2.6/urllib2.py", line 616, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "/usr/lib64/python2.6/urllib2.py", line 397, in open
response = meth(req, response)
File "/usr/lib64/python2.6/urllib2.py", line 510, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib64/python2.6/urllib2.py", line 435, in error
return self._call_chain(*args)
File "/usr/lib64/python2.6/urllib2.py", line 369, in _call_chain
result = func(*args)
File "/usr/lib64/python2.6/urllib2.py", line 518, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
urllib2.HTTPError: HTTP Error 404: NOT FOUND
And the URL is doesn't exist at all; https://www.python.org/pythonHomePage.html is indeed a 404 Not Found page.
The difference between urllib and urllib2 then is that the latter automatically raises an exception when a 404 page is returned, while urllib.urlretrieve() just saves the error page for you:
>>> import urllib
>>> urllib.urlopen('https://www.python.org/pythonHomePage.html').getcode()
404
>>> import urllib2
>>> urllib2.urlopen('https://www.python.org/pythonHomePage.html')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 127, in urlopen
return _opener.open(url, data, timeout)
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 410, in open
response = meth(req, response)
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 523, in http_response
'http', request, response, code, msg, hdrs)
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 448, in error
return self._call_chain(*args)
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 382, in _call_chain
result = func(*args)
File "/Users/mj/Development/Library/buildout.python/parts/opt/lib/python2.7/urllib2.py", line 531, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
urllib2.HTTPError: HTTP Error 404: NOT FOUND
If you wanted to save the error page, you can catch the urllib2.HTTPError exception:
try:
f = urllib2.urlopen(url)
data = f.read()
except urllib2.HTTPError as err:
data = err.read()
It is due to the different behavior by urllib and urllib2.
Since the web page returns a 404 error (webpage not found) urllib2 "catches" it while urllib downloads the html of the returned page regardless of the error.
If you want to print the html to the text file you can print the error:
import urllib2
try:
data = urllib2.urlopen('http://www.python.org/pythonHomePage.html').read()
except urllib2.HTTPError, e:
print e.code
print e.msg
print e.headers
print e.fp.read()
with open("code2.txt", "wb") as code:
code.write(e.fp.read())
req will be a Request object, fp will be a file-like object with the
HTTP error body, code will be the three-digit code of the error, msg
will be the user-visible explanation of the code and hdrs will be a
mapping object with the headers of the error.
More data about HTTP error: urllib2 documentation

After importing urllib2_file library my code is not working for proxy handling

without importing urllib2_file my code works fine .
import urllib2
import urllib
import random
import mimetypes
import string
import urllib2_file
proxy = urllib2.ProxyHandler({'http': '10.200.1.26'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
u = urllib2.urlopen("http://127.0.0.1:3333/command/core/create-importing-job",data=urllib.urlencode({"test":""}))
print u.read()
After importing urllib2_file library its complaining :
Traceback (most recent call last):
File "C:/hari/latest refine code/trialrefine.py", line 11, in <module>
u = urllib2.urlopen("http://127.0.0.1:3333/command/core/create-importing-job",data=urllib.urlencode({"test":""}))
File "C:\Python27\lib\urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 391, in open
response = self._open(req, data)
File "C:\Python27\lib\urllib2.py", line 409, in _open
'_open', req)
File "C:\Python27\lib\urllib2.py", line 369, in _call_chain
result = func(*args)
File "C:\Python27\urllib2_file.py", line 207, in http_open
return self.do_open(httplib.HTTP, req)
File "C:\Python27\urllib2_file.py", line 298, in do_open
return self.parent.error('http', req, fp, code, msg, hdrs)
File "C:\Python27\lib\urllib2.py", line 435, in error
return self._call_chain(*args)
File "C:\Python27\lib\urllib2.py", line 369, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 518, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
HTTPError: HTTP Error 404: Not Found
you are getting a 404 error. it means the url was wrong/server was down. note that urllib2_file overwrites the default HTTP handler of urllib2 :
urllib2._old_HTTPHandler = urllib2.HTTPHandler
urllib2.HTTPHandler = newHTTPHandler
one thing you could do is explicitly pass the urllib2._old_HTTPHandler to the opener. Other than that you really should go into the urllib2_file with a debugger to understand whats going wrong.

Categories

Resources