I am trying to use proxy IP addresses in Selenium for web scraping. I am running Python 2.7.3 on Mac OSX 10.7.5 and I have the following python code
import urllib2
from selenium import webdriver
fileproxylist = open('proxylist.txt', 'r')
proxyList = fileproxylist.readlines()
indexproxy = 0
totalproxy = len(proxyList)
def get_source_html_proxy(url, proxip):
proxyip=urllib2.ProxyHandler({'http':proxip})
opener = urllib2.build_opener(proxyip)
urllib2.install_opener(opener)
req=urllib2.Request(url)
sock=urllib2.urlopen(req)
data = sock.read()
return data
browser = webdriver.Chrome()
browser.get(get_source_html_proxy(MyUrl,proxyList[0]))
where MyUrl is the url of the address I want to scrap and proxlist[0] is the IP address I want to scrape from instead of the IP address of my local machine. When I run this code, I get the following error:
Traceback (most recent call last):
File "Scrape.py", line 89, in <module>
browser.get(get_source_html_proxy(MyUrl,proxyList[0]))
File "Scrape.py", line 83, in get_source_html_proxy
sock=urllib2.urlopen(req)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 126,
in urlopen
return _opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 400,
in open
response = self._open(req, data)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 418,
in _open
'_open', req)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 378,
in _call_chain
result = func(*args)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1207,
in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1177,
in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 8] nodename nor servname provided, or not known>
I'm not sure what the issue is here. Can someone help me figure out what's going on? Thanks!
Related
I am trying to make a function that will download file from the internet. If I go to the direct web address I do get the images or download the files. But, when I am running my code, it just hangs and then I get the timeout error. Is there any particular reason why that might be happening?
#fpp = "http://www.blog.pythonlibrary.org/wp-content/uploads/2012/06/wxDbViewer.zip"
fpp = "http://www.gunnerkrigg.com//comics/00000001.jpg"
download_file(fpp)
This is my function:
import urllib2
def download_file(url_path):
response = urllib2.urlopen(url_path)
data = response.read()
Is there any particular reason why might work from the browser but not in the code?
this is the error i get:
Traceback (most recent call last):
File "/Users/dk/testing/myfile.py", line 42, in <module>
download_file(fpp)
File "/Users/dk/Documents/testing/code_project.py", line 154, in download_file
response = urllib2.urlopen(url_path)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 154, in urlopen
return opener.open(url, data, timeout)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 431, in open
response = self._open(req, data)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 449, in _open
'_open', req)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 409, in _call_chain
result = func(*args)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1227, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1197, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 60] Operation timed out>
I'm using html2text in python to get raw text (tags included) of a HTML page by taking any URL but I'm getting an error.
My code -
import html2text
import urllib2
proxy = urllib2.ProxyHandler({'http': 'http://<proxy>:<pass>#<ip>:<port>'})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
html = urllib2.urlopen("http://www.ndtv.com/india-news/this-stunt-for-a-facebook-like-got-the-hyderabad-youth-arrested-740851").read()
print html2text.html2text(html)
The error -
Traceback (most recent call last):
File "t.py", line 8, in <module>
html = urllib2.urlopen("http://www.ndtv.com/india-news/this-stunt-for-a-facebook-like-got-the-hyderabad-youth-arrested-740851").read()
File "/usr/lib/python2.7/urllib2.py", line 127, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib/python2.7/urllib2.py", line 404, in open
response = self._open(req, data)
File "/usr/lib/python2.7/urllib2.py", line 422, in _open
'_open', req)
File "/usr/lib/python2.7/urllib2.py", line 382, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 1214, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/usr/lib/python2.7/urllib2.py", line 1184, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 110] Connection timed out>
Can anyone explain what I'm doing wrong?
If you don't require SSL, this script in Python 2.7.x should work:
import urllib
url = "http://stackoverflow.com"
f = urllib.urlopen(url)
print f.read()
and in Python 3.x use urllib.request instead of urllib
Because urllib2 for Python 2, in Python 3 it was merged into urllib.
http:// is required.
EDIT: In 2020, you should use the 3rd party module requests. requests can be installed with pip.
import requests
print(requests.get("http://stackoverflow.com").text)
I'm doing a easy work to get the page of "http://search.jd.com/Search?keyword=%E5%A5%87%E7%9F%B3&enc=utf-8"
so my python code is:
# -*- coding: utf-8 -*-
import sys, codecs
import urllib, urllib2
url = "http://search.jd.com/Search?keyword=%E5%A5%87%E7%9F%B3&enc=utf-8"
print url
page=urllib2.urlopen(url).read()
print page
however I get
Traceback (most recent call last):
File "tmp.py", line 15, in <module>
page=urllib2.urlopen(url).read()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 127, in urlopen
return _opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 404, in open
response = self._open(req, data)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 422, in _open
'_open', req)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 382, in _call_chain
result = func(*args)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1214, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 1184, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 8] nodename nor servname provided, or not known>
can anyone tell me what's going on?
many thanks!
Sounds like it might be a network issue. Check that you have a consistent internet connection (e.g. by pinging an appropriate server continuously as you run the tests). Just ran the code you post and worked perfectly fine for me.
Your codes working fine for me too.
But the error could occur in case, the url has some characters like "+=#" it would then require
s = "http://search.jd.com/Search?keyword=%E5%A5%87%E7%9F%B3&enc=utf-8"
my_url = urllib2.quote(s.encode("utf8"))
page=urllib2.urlopen(my_url).read()
print page
Alternatively you could use requests.
response =requests.post(url)
print response.content
or
print response.text
It's network issue, please be sure you are on a proper internet connection.
When I try to crawl Twitter using this code:
import urllib2
s = "https://mobile.twitter.com/bing/"
html = urllib2.urlopen(s).read()
print html
... I get the following error:
Traceback (most recent call last):
File "C:\Users\arpit\Downloads\Desktop\Wiki Code\final Crawler_wiki.py", line 14, in <module>
html = urllib2.urlopen(s).read()
File "C:\Python27\lib\urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 400, in open
response = self._open(req, data)
File "C:\Python27\lib\urllib2.py", line 418, in _open
'_open', req)
File "C:\Python27\lib\urllib2.py", line 378, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 1215, in https_open
return self.do_open(httplib.HTTPSConnection, req)
File "C:\Python27\lib\urllib2.py", line 1177, in do_open
raise URLError(err)
URLError: <urlopen error [Errno 10061] No connection could be made because the target machine actively refused it>
If I replace mobile.twitter.com with twitter.com then it works, but I want it to work with mobile.twitter.com.
The twitter site is probably looking for a user-agent which you dont have set when you make the request through the urllib api.
You will likely need to use something like mechanize to fake your user-agent.
But I highly suggest your use the twitter api which provide a lot of easy and awesome way to play with data.
I am not able to open a url for read() using urllib or urllib2 even after using proxyhandlers (in case of urllib2) and setting proxies in urllib.
My network which uses proxies to connect to internet have proxies (taken from my browser) is:
HTTP Proxy: someproxy.com Port: 1080
I have tried urllib:
import urllib
myproxies = {'http':'http://someproxy.com:1080'}
data = urllib.urlopen('http://www.google.com', proxies = myproxies).read()
but I am receiving this error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\urllib.py", line 84, in urlopen
return opener.open(url)
File "C:\Python27\lib\urllib.py", line 200, in open
return self.open_unknown_proxy(proxy, fullurl, data)
File "C:\Python27\lib\urllib.py", line 219, in open_unknown_proxy
raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
IOError: [Errno socket error] [Errno 11001] getaddrinfo failed'
and for urllib2:
import urllib2
proxy = urllib2.ProxyHandler({'http':'http://someproxy.com:1080'})
opener1 = urllib2.build_opener(proxy)
urllib2.install_opener(opener1)
urllib2.urlopen('http://www.google.com')'
I am getting this error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 394, in open
response = self._open(req, data)
File "C:\Python27\lib\urllib2.py", line 412, in _open
'_open', req)
File "C:\Python27\lib\urllib2.py", line 372, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 1199, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "C:\Python27\lib\urllib2.py", line 1174, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 11001] getaddrinfo failed>
any help will be greatly appreciated.
MRick
I think you want the following for urllib
...
proxies = {'http':'http://someproxy.com:1080/'}
data = urllib.urlopen('http://www.google.com', proxies=proxies).read()
...
or this for urllib2:
...
proxy = urllib2.ProxyHandler({'http':'http://someproxy.com:1080'})
...
Note the proxy url includes the protocol part, which your code omits.