I want to download images from an URL link which has a random component in it, so i have generated a code to do the same, but i'm getting an error -
Code:
import urllib.request
import random
random_number=random.randint(500,600)
url_image="'https://csgostash.com/img/skins/s"+str(random_number)+"fn.png'"
image=urllib.request.urlretrieve(url_image, 'skin.png')
Error:
Traceback (most recent call last):
File "C:/Users/luke/Desktop/scraper/test image download/cs test.py", line 8, in <module>
image=urllib.request.urlretrieve(url_image, 'skin.png')
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 187, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 465, in open
response = self._open(req, data)
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 488, in _open
'unknown_open', req)
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 443, in _call_chain
result = func(*args)
File "C:\Users\luke\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 1310, in unknown_open
raise URLError('unknown url type: %s' % type)
urllib.error.URLError: <urlopen error unknown url type: 'https>
First, url_image has an weird syntax.
url_image="https://csgostash.com/img/skins/s"+str(random_number)+"fn.png"
If you fix this, you will notice an 403 - Vax! Protection against bot: use a user agent.
import urllib.request
import random
random_number=random.randint(500,600)
url_image="https://csgostash.com/img/skins/s"+str(random_number)+"fn.png"
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
headers = {'User-Agent': user_agent}
req = urllib.request.Request(url_image, None, headers)
print(url_image)
#image, h = urllib.request.urlretrieve(url_image)
with urllib.request.urlopen(req) as response:
the_page = response.read()
print (the_page)
Edit: ofcourse you may save to file:
with open('skin.png', 'wb') as f:
f.write(the_page)
Check out this project using requests.
Related
I am trying to scrape some Photos from different websites for my coding class.
I am using Beautiful Soup, and Urlib to do this.
Here is my code
import json
import time
from urllib.request import urlopen, Request
from urllib.request import urlretrieve
import urllib.request
from bs4 import BeautifulSoup
import os
import re
site = "https://www.hollisterco.com/shop/us/guys-new-arrivals"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
req = Request(url=site, headers=headers)
html = urlopen(req, timeout=30)
bs = BeautifulSoup(html, 'html.parser')
images = bs.find_all('img', {'src':re.compile('.jpg')})
count = 26
for image in images:
try:
#print(image["src"])
urlretrieve(image["src"], str(count)+".jpg")
count += 1
except:
continue
This code works for some websites, like www.vineyardvines.com worked just fine, but its doesnt work for www.hollisterco.com, what can I do to fix this, here is the error I am getting for hollisterco.com:
Traceback (most recent call last):
File "C:/Users/momin/PycharmProjects/scraper/scraper.py", line 22, in <module>
html = urlopen(req, timeout=30).read().decode()
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 525, in open
response = self._open(req, data)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 543, in _open
'_open', req)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 1360, in https_open
context=self._context, check_hostname=self._check_hostname)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 1320, in do_open
r = h.getresponse()
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1336, in getresponse
response.begin()
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 306, in begin
version, status, reason = self._read_status()
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 267, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1071, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\momin\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 929, in read
return self._sslobj.read(len, buffer)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
Okay, I figured out a solution.
Here is my advice:
Use selenium or requests for getting the content HTML data.
I have been trying to send requests using custom headers but python keeps on giving this error message, what am I getting wrong?
Here is my code together with the full response from python:
Code
import urllib.request
import urllib.parse
url = 'https://nytimes.com/'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
values = {'name': 'Kromanion Krank', 'location': 'Finland', 'language': 'Python'}
headers = {'User-Agent': user_agent}
data = urllib.parse.urlencode(values)
data = data.encode('ascii')
html_str = urllib.request.urlopen(url, data, headers)
html_txt = html_str.text
print(html_txt)
Output error
Traceback (most recent call last):
File "C:/Users/fpt84/PycharmProjects/WebC/TEST.py", line 11, in <module>
html_str = urllib.request.urlopen(url, data, headers)
File "C:\Python34\lib\urllib\request.py", line 161, in urlopen
return opener.open(url, data, timeout)
File "C:\Python34\lib\urllib\request.py", line 464, in open
response = self._open(req, data)
File "C:\Python34\lib\urllib\request.py", line 482, in _open
'_open', req)
File "C:\Python34\lib\urllib\request.py", line 442, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 1226, in https_open
context=self._context, check_hostname=self._check_hostname)
File "C:\Python34\lib\urllib\request.py", line 1183, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "C:\Python34\lib\http\client.py", line 1137, in request
self._send_request(method, url, body, headers)
File "C:\Python34\lib\http\client.py", line 1182, in _send_request
self.endheaders(body)
File "C:\Python34\lib\http\client.py", line 1133, in endheaders
self._send_output(message_body)
File "C:\Python34\lib\http\client.py", line 963, in _send_output
self.send(msg)
File "C:\Python34\lib\http\client.py", line 898, in send
self.connect()
File "C:\Python34\lib\http\client.py", line 1279, in connect
super().connect()
File "C:\Python34\lib\http\client.py", line 871, in connect
self.timeout, self.source_address)
File "C:\Python34\lib\socket.py", line 504, in create_connection
sock.settimeout(timeout)
TypeError: a float is required
The signature of urllib.request.urlopen (from https://docs.python.org/3/library/urllib.request.html#urllib.request.urlopen) is:
urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)
Your third "headers" argument is getting passed into the timeout argument of urlopen
You'll need to use a Request object to do what you want:
req = urllib.request.Request(url, data, headers)
resp = urllib.request.urlopen(req)
From your code example (I also had to modify the usage of the response)
import urllib.request
import urllib.parse
url = 'https://nytimes.com/'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
values = {'name': 'Kromanion Krank', 'location': 'Finland', 'language': 'Python'}
headers = {'User-Agent': user_agent}
data = urllib.parse.urlencode(values)
data = data.encode('ascii')
request = urllib.request.Request(url, data, headers)
resp = urllib.request.urlopen(request)
html_txt = resp.read().decode('UTF-8')
print(html_txt)
I was trying to get a web page, but got into this problem. I've looked up some references, and this is what I've done so far:
import sys
import urllib2
from bs4 import BeautifulSoup
user = 'myuserID'
password = "mypassword"
ip = sys.argv[1]
url = "http://www.websites.com/" + ip
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, user, password)
handler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
header = {
'Connection' : 'keep-alive',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip, deflate'
}
html = urllib2.urlopen(urllib2.Request(url, None, header))
soup = BeautifulSoup(html, 'html.parser')
# some if else function afterwards #
When I try to run the script, it shows this kind of error:
python checker.py 8.8.8.8
Traceback (most recent call last):
File "checker.py", line 34, in <module>
html = urllib2.urlopen(urllib2.Request(url, None, header))
File "C:\Python27\lib\urllib2.py", line 154, in urlopen
return opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 437, in open
response = meth(req, response)
File "C:\Python27\lib\urllib2.py", line 550, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python27\lib\urllib2.py", line 469, in error
result = self._call_chain(*args)
File "C:\Python27\lib\urllib2.py", line 409, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 656, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "C:\Python27\lib\urllib2.py", line 437, in open
response = meth(req, response)
File "C:\Python27\lib\urllib2.py", line 550, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python27\lib\urllib2.py", line 475, in error
return self._call_chain(*args)
File "C:\Python27\lib\urllib2.py", line 409, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 558, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
urllib2.HTTPError: HTTP Error 401: authenticationrequired
But if I opened the page or other web page, and manually enter my credential, this script works fine after that. Am I missing something?
Just to add, my current network are using McAfee web gateway device. So sometimes we need to enter our credential to proceed browsing the net. Our user/pass are integrated with Active Directory. Is that may cause the issue?
This seems to work really well (taken from another thread)
import urllib2
import base64
import sys
user = 'myuserID'
password = "mypassword"
ip = sys.argv[1]
url = "http://www.websites.com/" + ip
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (user, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
Or you may use requests:
from requests.auth import HTTPBasicAuth
user = 'myuserID'
password = "mypassword"
ip = sys.argv[1]
url = "http://www.websites.com/" + ip
res=requests.get(url , auth=HTTPBasicAuth(user, password))
print res.text
Hy!
I tried to open web-page, that is normally opening in browser, but python just swears and does not want to work.
import urllib.request, urllib.error
f = urllib.request.urlopen('http://www.booking.com/reviewlist.html?cc1=tr;pagename=sapphire')
And another way
import urllib.request, urllib.error
opener=urllib.request.build_opener()
f=opener.open('http://www.booking.com/reviewlist.html?cc1=tr;pagename=sapphi
re')
Both options give one type of error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python34\lib\urllib\request.py", line 461, in open
response = meth(req, response)
File "C:\Python34\lib\urllib\request.py", line 571, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python34\lib\urllib\request.py", line 493, in error
result = self._call_chain(*args)
File "C:\Python34\lib\urllib\request.py", line 433, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 676, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "C:\Python34\lib\urllib\request.py", line 461, in open
response = meth(req, response)
File "C:\Python34\lib\urllib\request.py", line 571, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python34\lib\urllib\request.py", line 499, in error
return self._call_chain(*args)
File "C:\Python34\lib\urllib\request.py", line 433, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 579, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 400: Bad Request
Any ideas?
They are probably blocking the fact that it isn't coming from a browser. You probably need a valid User-Agent header or something.
Using requests, this works:
import requests
headers =
{
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36'
}
r = requests.get('http://www.booking.com/reviewlist.html?cc1=tr;pagename=sapphire', headers=headers)
print r
print r.headers
This URL seems to be doing user agent string checking. If I adjust my user agent string in Firefox to Python-urllib/2.7, it fails with the Bad Request you are seeing.
As you are using urllib, you can adjust the User Agent following this tutorial
from urllib.request import FancyURLopener
class MyOpener(FancyURLopener):
version = 'My new User-Agent' # Set this to a string you want for your user agent
myopener = MyOpener()
page = myopener.open('http://www.booking.com/reviewlist.html?cc1=tr;pagename=sapphire')
I want to make a simple stupid twitter app using Twitter API.
If I request this page from my browser it does work:
http://search.twitter.com/search.atom?q=hello&rpp=10&page=1
but if I request this page from python using urllib or urllib2 most of the times it doesn't work:
response = urllib2.urlopen("http://search.twitter.com/search.atom?q=hello&rpp=10&page=1")
and I get this error:
Traceback (most recent call last):
File "twitter.py", line 24, in <module>
response = urllib2.urlopen("http://search.twitter.com/search.atom?q=hello&rpp=10&page=1")
File "/usr/lib/python2.6/urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib/python2.6/urllib2.py", line 391, in open
response = self._open(req, data)
File "/usr/lib/python2.6/urllib2.py", line 409, in _open
'_open', req)
File "/usr/lib/python2.6/urllib2.py", line 369, in _call_chain
result = func(*args)
File "/usr/lib/python2.6/urllib2.py", line 1161, in http_open
return self.do_open(httplib.HTTPConnection, req)
File "/usr/lib/python2.6/urllib2.py", line 1136, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno 110] Connection timed out>
Why ??
The code seems alright.
The following worked.
>>> import urllib
>>> import urllib2
>>> user_agent = 'curl/7.21.1 (x86_64-apple-darwin10.4.0) libcurl/7.21.1'
>>> url='http://search.twitter.com/search.atom?q=hello&rpp=10&page=1'
>>> headers = { 'User-Agent' : user_agent }
>>> req = urllib2.Request(url, None, headers)
>>> response = urllib2.urlopen(req)
>>> the_page = response.read()
>>> print the_page
The other is twitter actually could not respond. This happens once too often with Twitter.
did you change the default socket timeout somewhere in your script? your example code works reliably for me.
it could be your internet connection, or you might try
import socket
socket.setdefaulttimeout(30)
assuming urllib/2 don't override the socket timeout.