How can i choose the position in which the downloaded file is stored? My code:
import urllib.request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent,}
request = urllib.request.Request("http://download.thinkbroadband.com/5MB.zip",None,headers)
response = urllib.request.urlopen(request)
data = response.read()
You're almost there. So you've got data:
ofile = open(where_you_want_to_store_the_data,"wb")
ofile.write(data)
ofile.close()
For a cleaner way you can use urlretrieve:
urlretrieve(url, "/path/to/something.txt")
Related
I used urllib.request.Request for the url of a memidex.com page, but the urllib.request.urlopen(url) line goes on to fail to open the url.
url = urllib.request.Request("http://www.memidex.com/" + term)
my_request = urllib.request.urlopen(url)
info = BeautifulSoup(my_request, "html.parser")
I've tried using the same code for a different website and it worked for that one so I have no idea why it's not working for memidex.com.
You need to add headers to your url request in order to overcome the error. BTW 'HTTP Error 403: Forbidden' was your error right?
Hope the below code helps you.
import urllib.request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = "http://www.memidex.com/"
headers={'User-Agent':user_agent,}
request=urllib.request.Request(url,None,headers)
response = urllib.request.urlopen(request)
data = response.read()
print(data)
I tried every 'User-Agent' in here, still I get urllib.error.HTTPError: HTTP Error 400: Bad Request. I also tried this, but I get urllib.error.URLError: File Not Found. I have no idea what to do, my current codes are;
from bs4 import BeautifulSoup
import urllib.request,json,ast
with open ("urller.json") as f:
cc = json.load(f) #the file I get links, you can try this link instead of this
#cc = ../games/index.php?g_id=23521&game=0RBITALIS
for x in ast.literal_eval(cc): #cc is a str(list) so I have to convert
if x.startswith("../"):
r = urllib.request.Request("http://www.game-debate.com{}".format(x[2::]),headers={'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
#x[2::] because I removed '../' parts from urlls
rr = urllib.request.urlopen(r).read()
soup = BeautifulSoup(rr)
for y in soup.find_all("ul",attrs={'class':['devDefSysReqList']}):
print (y.text)
Edit: If you try only 1 link probably it won't show any error, since I get the error every time at 6th link.
A quick fix is to replace the space with +:
url = "http://www.game-debate.com"
r = urllib.request.Request(url + x[2:] ,headers={'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
A better option may be to let urllib quote the params:
from bs4 import BeautifulSoup
import urllib.request,json,ast
from urllib.parse import quote, urljoin
with open ("urller.json") as f:
cc = json.load(f) #the file I get links, you can try this link instead of this
url = "http://www.game-debate.com"
for x in ast.literal_eval(cc): # cc is a str(list) so I have to convert
if x.startswith("../"):
r = urllib.request.Request(urljoin(url, quote(x.lstrip("."))), headers={
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
rr = urllib.request.urlopen(r).read()
soup = BeautifulSoup(rr)
print(rr.decode("utf-8"))
for y in soup.find_all("ul", attrs={'class':['devDefSysReqList']}):
print (y.text)
Spaces in a url are not valid and need to be percent encoded as %20 or replaced with +.
I'm using Python 3.5.0 to grab some census data. When I use my script it does retrieve the data from the url and saves it but the file that was saved can't be imported to SQL because it somehow dropped the {CR}{LF}. How can I get the file it saves able of being imported to SQL?
try:
url = 'https://www.census.gov/popest/data/counties/asrh/2014/files/CC-EST2014-ALLDATA.csv'
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'
req = urllib.request.Request(url,headers=headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
saveFile = open('Vintage2014.csv' ,'w')
saveFile.write(str(respData))
saveFile.close()
except Exception as e:
print(str(e))
Note, the file you are trying to download does not contain CRLF only LF.
You could use the following approach to convert the bytes to a suitable string. This should also result in you getting CRLF:
import urllib.request
try:
url = 'https://www.census.gov/popest/data/counties/asrh/2014/files/CC-EST2014-ALLDATA.csv'
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
with open('Vintage2014.csv', 'w') as saveFile:
saveFile.write(respData.decode('latin-1'))
except Exception as e:
print(str(e))
How come I hit this webpage, I get HTML text:
http://itunes.apple.com/us/app/mobile/id381057839
But when I hit this webpage, I get garbled junk?
http://itunes.apple.com/us/app/mobile/id375562663
I use the same download() function in python, which is here:
def download(source_url):
try:
socket.setdefaulttimeout(10)
agent = "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.10) Gecko/20100914 AlexaToolbar/alxf-1.54 Firefox/3.6.10 GTB7.1"
ree = urllib2.Request(source_url)
ree.add_header('User-Agent',agent)
ree.add_header("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
ree.add_header("Accept-Language","en-us,en;q=0.5")
ree.add_header("Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7")
ree.add_header("Accept-Encoding","gzip,deflate")
ree.add_header("Host","itunes.apple.com")
resp = urllib2.urlopen(ree)
htmlSource = resp.read()
return htmlSource
except Exception, e:
print e
Solved. It was compression issue.
def download(source_url):
try:
socket.setdefaulttimeout(10)
agents = ['Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)','Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)','Microsoft Internet Explorer/4.0b1 (Windows 95)','Opera/8.00 (Windows NT 5.1; U; en)']
ree = urllib2.Request(source_url)
ree.add_header('User-Agent',random.choice(agents))
ree.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener()
h = opener.open(ree).read()
import StringIO
import gzip
compressedstream = StringIO.StringIO(h)
gzipper = gzip.GzipFile(fileobj=compressedstream)
data = gzipper.read()
return data
except Exception, e:
print e
return ""
How can I download a webpage with a user agent other than the default one on urllib2.urlopen?
I answered a similar question a couple weeks ago.
There is example code in that question, but basically you can do something like this: (Note the capitalization of User-Agent as of RFC 2616, section 14.43.)
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
response = opener.open('http://www.stackoverflow.com')
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request('www.example.com', None, headers)
html = urllib2.urlopen(req).read()
Or, a bit shorter:
req = urllib2.Request('www.example.com', headers={ 'User-Agent': 'Mozilla/5.0' })
html = urllib2.urlopen(req).read()
Setting the User-Agent from everyone's favorite Dive Into Python.
The short story: You can use Request.add_header to do this.
You can also pass the headers as a dictionary when creating the Request itself, as the docs note:
headers should be a dictionary, and will be treated as if add_header() was called with each key and value as arguments. This is often used to “spoof” the User-Agent header, which is used by a browser to identify itself – some HTTP servers only allow requests coming from common browsers as opposed to scripts. For example, Mozilla Firefox may identify itself as "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11", while urllib2‘s default user agent string is "Python-urllib/2.6" (on Python 2.6).
For python 3, urllib is split into 3 modules...
import urllib.request
req = urllib.request.Request(url="http://localhost/", headers={'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'})
handler = urllib.request.urlopen(req)
All these should work in theory, but (with Python 2.7.2 on Windows at least) any time you send a custom User-agent header, urllib2 doesn't send that header. If you don't try to send a User-agent header, it sends the default Python / urllib2
None of these methods seem to work for adding User-agent but they work for other headers:
opener = urllib2.build_opener(proxy)
opener.addheaders = {'User-agent':'Custom user agent'}
urllib2.install_opener(opener)
request = urllib2.Request(url, headers={'User-agent':'Custom user agent'})
request.headers['User-agent'] = 'Custom user agent'
request.add_header('User-agent', 'Custom user agent')
For urllib you can use:
from urllib import FancyURLopener
class MyOpener(FancyURLopener, object):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
myopener.retrieve('https://www.google.com/search?q=test', 'useragent.html')
Another solution in urllib2 and Python 2.7:
req = urllib2.Request('http://www.example.com/')
req.add_unredirected_header('User-Agent', 'Custom User-Agent')
urllib2.urlopen(req)
there are two properties of urllib.URLopener() namely:
addheaders = [('User-Agent', 'Python-urllib/1.17'), ('Accept', '*/*')] and
version = 'Python-urllib/1.17'.
To fool the website you need to changes both of these values to an accepted User-Agent. for e.g.
Chrome browser : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.149 Safari/537.36'
Google bot : 'Googlebot/2.1'
like this
import urllib
page_extractor=urllib.URLopener()
page_extractor.addheaders = [('User-Agent', 'Googlebot/2.1'), ('Accept', '*/*')]
page_extractor.version = 'Googlebot/2.1'
page_extractor.retrieve(<url>, <file_path>)
changing just one property does not work because the website marks it as a suspicious request.
Try this :
html_source_code = requests.get("http://www.example.com/",
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'x-runtime': '148ms'},
allow_redirects=True).content