Proxy Scraper from Python 2.x to Python 3.x conversion - python

I was trying to convert a very simple function on Python 2 to Python 3 that would scrape a web page and return a list of proxys so I could use on a Twitter robot:
#!/usr/bin/env python
#python25 on windows7
#####################################
# GPL v2
# Author: Arjun Sreedharan
# Email: arjun024#gmail.com
#####################################
import urllib2
import re
import os
import time
import random
def main():
request = urllib2.Request("http://www.ip-adress.com/proxy_list/")
# request.add_header("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5")
#Without Referer header ip-adress.com gives 403 Forbidden
request.add_header("Referer","https://www.google.co.in/")
f = urllib2.urlopen(request)
#outfile = open('outfile.htm','w')
str1 = f.read()
#outfile.write(str1)
# normally DOT matches anycharacter EXCEPT newline. re.DOTALL makes dot
include newline
pattern = re.compile('.*<td>(.*)</td>.*<td>Elite</td>.*', re.DOTALL)
matched = re.search(pattern,str1)
print(matched.group(1))
"""
ip = matched.group(1)
os.system('echo "http_proxy=http://'+ip+'" > ~/.wgetrc')
if random.randint(1,2)==1:
os.system('wget --proxy=on -t 1 --timeout=14 --header="User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5" http://funnytweets.in -O /dev/null')
else:
os.system('wget --proxy=on -t 1 --timeout=14 --header="User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13" http://funnytweets.in -O /dev/null')
"""
if __name__ == '__main__':
while True:
main()
time.sleep(2)
Ok, I already know that the urllib2 is diferent on P3 but i could not make it work :( Anyone can help? :) thanks!

In Python3 Request and urlopen are located in the urllib.request module, so hou have to change your imports accordingly.
from urllib.request import Request, urlopen
You could make your code Python2 and Python3 compatible if you catch ImportError exceptions when importing from urllib2.
try :
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
Also keep in mind that URLError and HTTPError are located in urllib.error, if you need them.

Related

BeautifulSoup HTTP Error 403: Forbidden - Not Sure Why

I the following python program that when I execute it, I get an HTTP 403: Forbidden error.
Here's the code:
import os
import re
from bs4 import BeautifulSoup
#import urllib.request
#from urllib.request import request, urlopen
from urllib import request
import pandas as pd
import numpy as np
import datetime
import time
import openpyxl
for a in range(0,len(symbols),1):
"""
Attempt to get past the Forbidden error message.
"""
#ua = UserAgent()
url = "https://fintel.io/ss/us/" + symbols[a]
"""
test urls:
https://fintel.io/ss/us/A
https://fintel.io/ss/us/BBY'
https://fintel.io/ss/us/WMT'
"""
print("Extracting Values for " + symbols[a] + ".")
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
}
try:
page_request = request.Request(url, headers=header)
page = request.urlopen(page_request)
#old page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, "html.parser", from_encoding="iso-8859-1")
The result I'm getting:
Extracting Values for A.
HTTP Error 403: Forbidden
Extracting Values for BBY.
HTTP Error 403: Forbidden
Extracting Values for WMT.
HTTP Error 403: Forbidden
Any suggestions on how to handle this?
That's not a BeautifulSoup specific error, the website you're trying to scrape is probably protected by Cloudflare's anti bot page. You can try using cfscrape to bypass this.
Your complaint is with Request, rather than with beautifulsoup.
The website refuses to serve your requests.
When I use that UA it works fine, I receive a 200 document:
$ wget -U 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36' -S https://fintel.io/ss/us/BBY
But then again, even using UA of 'Wget/1.20.3 (darwin19.0.0)' works just fine:
$ wget -S https://fintel.io/ss/us/BBY
Looks like you've been doing excessive crawling,
and your IP is now being blocked by the webserver.

server sends 403 status code when using requests library in python, but works with browser

I'm trying to automate a login using python's requests module, but whenever I use the POST or GET request the server sends 403 status code; the weird part is that I can access that same URL with any browser but it just won't work with curl and requests.
here is the code:
import requests
import lxml
from bs4 import BeautifulSoup
import os
url = "https://ais.usvisa-info.com/en-am/niv/users/sign_in"
req = requests.get(url).text
soup = BeautifulSoup(req, 'lxml')
ready = soup.prettify()
FILE = open("usvisa.html", "w")
FILE.write(ready)
FILE.close()
I'd appreciate any help or idea!
Its probably the /robots.txt, thats blocking you.
try overriding the user-agent with a custom one.
import requests
import lxml
from bs4 import BeautifulSoup
import os
headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"}
url = "https://ais.usvisa-info.com/en-am/niv/users/sign_in"
req = requests.get(url, headers=headers).text
soup = BeautifulSoup(req, 'lxml')
ready = soup.prettify()
FILE = open("usvisa.html", "w", encoding="utf-8")
FILE.write(ready)
FILE.close()
you also didnt specify the file encoding when opening a file.

Web scraping: HTTPError: HTTP Error 403: Forbidden, python3

Hi I am need to scrape web page end extract data-id use Regular expression
Here is my code :
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("https://clarity-project.info/tenders/?entity=38163425&offset=100")
bsObj = BeautifulSoup(html,"html.parser")
DataId = bsObg.findAll("data-id", {"skr":re.compile("data-id=[0-9,a-f]")})
for DataId in DataId:
print(DataId["skr"])
when I run my program in Jupyter :
HTTPError: HTTP Error 403: Forbidden
It looks like the web server is asking you to authenticate before serving content to Python's urllib. However, they serve everything neatly to wget and curl and https://clarity-project.info/robots.txt doesn't seem to exist, so I reckon scraping as such is fine with them. Still, it might be a good idea to ask them first.
As for the code, simply changing the User Agent string to something they like better seems to work:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.request import urlopen, Request
request = Request(
'https://clarity-project.info/tenders/?entity=38163425&offset=100',
headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0'})
html = urlopen(request).read().decode()
(unrelated, you have another mistake in your code: bsObj ≠ bsObg)
EDIT added code below to answer additional question from the comments:
What you seem to need is to find the value of the data-id attribute, no matter to which tag it belongs. The code below does just that:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
url = 'https://clarity-project.info/tenders/?entity=38163425&offset=100'
agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36\
(KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
request = Request(url, headers={'User-Agent': agent})
html = urlopen(request).read().decode()
soup = BeautifulSoup(html, 'html.parser')
tags = soup.findAll(lambda tag: tag.get('data-id', None) is not None)
for tag in tags:
print(tag['data-id'])
The key is to simply use a lambda expression as the parameter to the findAll function of BeautifulSoup.
The server is likely blocking your requests because of the default user agent. You can change this so that you will appear to the server to be a web browser. For example, a Chrome User-Agent is:
Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36
To add a User-Agent you can create a request object with the url as a parameter and the User-Agent passed in a dictionary as the keyword argument 'headers'.
See:
import urllib.request
r = urllib.request.Request(url, headers= {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'})
html = urllib.request.urlopen(r)
You could try with this:
#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
url = 'your url here'
soup = BeautifulSoup(requests.get(url).text,"html.parser")
for i in soup.find_all('tr', attrs={'class':'table-row'}):
print '[Data id] => {}'.format(i.get('data-id'))
This should work!

Urllib bad request issue

I tried every 'User-Agent' in here, still I get urllib.error.HTTPError: HTTP Error 400: Bad Request. I also tried this, but I get urllib.error.URLError: File Not Found. I have no idea what to do, my current codes are;
from bs4 import BeautifulSoup
import urllib.request,json,ast
with open ("urller.json") as f:
cc = json.load(f) #the file I get links, you can try this link instead of this
#cc = ../games/index.php?g_id=23521&game=0RBITALIS
for x in ast.literal_eval(cc): #cc is a str(list) so I have to convert
if x.startswith("../"):
r = urllib.request.Request("http://www.game-debate.com{}".format(x[2::]),headers={'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
#x[2::] because I removed '../' parts from urlls
rr = urllib.request.urlopen(r).read()
soup = BeautifulSoup(rr)
for y in soup.find_all("ul",attrs={'class':['devDefSysReqList']}):
print (y.text)
Edit: If you try only 1 link probably it won't show any error, since I get the error every time at 6th link.
A quick fix is to replace the space with +:
url = "http://www.game-debate.com"
r = urllib.request.Request(url + x[2:] ,headers={'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
A better option may be to let urllib quote the params:
from bs4 import BeautifulSoup
import urllib.request,json,ast
from urllib.parse import quote, urljoin
with open ("urller.json") as f:
cc = json.load(f) #the file I get links, you can try this link instead of this
url = "http://www.game-debate.com"
for x in ast.literal_eval(cc): # cc is a str(list) so I have to convert
if x.startswith("../"):
r = urllib.request.Request(urljoin(url, quote(x.lstrip("."))), headers={
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'})
rr = urllib.request.urlopen(r).read()
soup = BeautifulSoup(rr)
print(rr.decode("utf-8"))
for y in soup.find_all("ul", attrs={'class':['devDefSysReqList']}):
print (y.text)
Spaces in a url are not valid and need to be percent encoded as %20 or replaced with +.

download images with google customsearch api

My question is probably trivial. But I tried to configure my customsearch engine to download the first 10 results of google image. Here is python script. It works except than my result a very far from the one of google image with the same parameter. Does someone can tell me what I miss?
# -*- coding: utf-8 -*-
import os
import sys
from urllib import FancyURLopener
import urllib2
import simplejson
from apiclient.discovery import build
class MyOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
service = build("customsearch", "v1",
developerKey="** my Key **")
res = service.cse().list(
q='SearchTerm',
cx='** my cx**',
searchType='image',
num=10,
imgType='photo',
fileType='jpg',
imgSize="xxlarge"
).execute()
count=0
for item in res['items']:
myopener.retrieve(item['link'],str(count))
count=count+1

Categories

Resources