python mechanize follow_link fails - python

I'm trying to access search results on the NCBI Images search page (http://www.ncbi.nlm.nih.gov/images) in a script. I want to feed it a search term, report on all of the results, and then move on to the next search term. To do this I need to get to results pages after the first page, so I'm trying to use python mechanize to do it:
import mechanize
browser=mechanize.Browser()
page1=browser.open('http://www.ncbi.nlm.nih.gov/images?term=drug')
a=browser.links(text_regex='Next')
nextlink=a.next()
page2=browser.follow_link(nextlink)
This just gives me back the first page of search results again (in variable page2). What am I doing wrong, and how can I get to that second page and beyond?

Unfortunately that page uses Javascript to POST 2459 bytes of form variables to the server, just to navigate to a subsequent page. Here are a few of the variables (I count 38 vars in total):
EntrezSystem2.PEntrez.ImagesDb.Images_SearchBar.Term=drug
EntrezSystem2.PEntrez.ImagesDb.Images_SearchBar.CurrDb=images
EntrezSystem2.PEntrez.ImagesDb.Images_ResultsPanel.Entrez_Pager.CurrPage=2
You'll need to construct a POST request to the server containing some or all of these variables. Luckily if you get it working for page 2 you can simply increment CurrPage and send another POST to get each subsequent page of results (no need to extract links).
Update - That site is a total pain-in-the-ass, but here is a POST-based scrape of the 2-N pages. Set MAX_PAGE to the highest page number + 1. The script will produce files like file_000003.html.
Note: Before you use it, you need to replace POSTDATA with the contents of this paste blob (it expires in 1 month). It's just the body a POST request as captured by Firebug, which I use to seed the correct params:
import cookielib
import json
import mechanize
import sys
import urllib
import urlparse
MAX_PAGE = 6
TERM = 'drug'
DEBUG = False
base_url = 'http://www.ncbi.nlm.nih.gov/images?term=' + TERM
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.set_handle_referer(True)
browser.set_debug_http(DEBUG)
browser.set_debug_responses(DEBUG)
cjar = cookielib.CookieJar()
browser.set_cookiejar(cjar)
# make first GET request. this will populate the cookie
res = browser.open(base_url)
def write(num, data):
with open('file_%06d.html' % num, 'wb') as out:
out.write(data)
def encode(kvs):
res = []
for key, vals in kvs.iteritems():
if isinstance(vals, list):
for v in vals:
res.append('%s=%s' % (key, urllib.quote(v)))
else:
res.append('%s=%s' % (key, urllib.quote(vals)))
return '&'.join(res)
write(1, res.read())
# set this var equal to the contents of this: http://pastebin.com/UfejW3G0
POSTDATA = '''<post data>'''
# parse the embedded json vars into POST parameters
PREFIX1 = 'EntrezSystem2.PEntrez.ImagesDb.'
PREFIX2 = 'EntrezSystem2.PEntrez.DbConnector.'
params = dict((k, v[0]) for k, v in urlparse.parse_qs(POSTDATA).iteritems())
base_url = 'http://www.ncbi.nlm.nih.gov/images'
for page in range(2, MAX_PAGE):
params[PREFIX1 + 'Images_ResultsPanel.Entrez_Pager.CurrPage'] = str(page)
params[PREFIX1 + 'Images_ResultsPanel.Entrez_Pager.cPage'] = [str(page-1)]*2
data = encode(params)
req = mechanize.Request(base_url, data)
cjar.add_cookie_header(req)
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
req.add_header('Referer', base_url)
res = browser.open(req)
write(page, res.read())

Related

Unable to scrape emails from some websites maybe due to r.html.render() not working properly

I have some website links as samples for extracting any email available in their internal sites.
However, even I am trying to render any JS driven website via r.html.render() within scrape_email(url) method, some of the websites like arken.trygge.dk, gronnebakken.dk, dagtilbud.ballerup.dk/boernehuset-bispevangen etc. does not return any email which might be due to rendering issue.
I have attached the sample file for convenience of running
I dont want to use selenium as there can be thousands or millions of webpage I want to extract emails from.
So far this is my code:
import os
import time
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import re
from requests_html import HTMLSession
import pandas as pd
from gtts import gTTS
import winsound
# For convenience of seeing console output in the script
pd.options.display.max_colwidth = 180
#Get the start time of script execution
startTime = time.time()
#Paste file name inside ''
input_file_name = 'sample'
input_df = pd.read_excel(input_file_name+'.xlsx', engine='openpyxl')
input_df = input_df.dropna(how='all')
internal_urls = set()
emails = set()
total_urls_visited = 0
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_internal_links(url):
"""
Returns all URLs that is found on `url` in which it belongs to the same website
"""
# all URLs of `url`
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
print("Domain name -- ",domain_name)
try:
soup = BeautifulSoup(requests.get(url, timeout=5).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if parsed_href.netloc != domain_name:
# if the link is not of same domain pass
continue
if parsed_href.path.endswith((".csv",".xlsx",".txt", ".pdf", ".mp3", ".png", ".jpg", ".jpeg", ".svg", ".mov", ".js",".gif",".mp4",".avi",".flv",".wav")):
# Overlook site images,pdf and other file rather than webpages
continue
print(f"Internal link: {href}")
urls.add(href)
internal_urls.add(href)
return urls
except requests.exceptions.Timeout as err:
print("The website is not loading within 5 seconds... Continuing crawling the next one")
pass
except:
print("The website is unavailable. Continuing crawling the next one")
pass
def crawl(url, max_urls=30):
"""
Crawls a web page and extracts all links.
You'll find all links in `external_urls` and `internal_urls` global set variables.
params:
max_urls (int): number of max urls to crawl, default is 30.
"""
global total_urls_visited
total_urls_visited += 1
print(f"Crawling: {url}")
links = get_internal_links(url)
# for link in links:
# if total_urls_visited > max_urls:
# break
# crawl(link, max_urls=max_urls)
def scrape_email(url):
EMAIL_REGEX = r'\b[A-Za-z0-9._%+-]+#[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
# EMAIL_REGEX = r"""(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")#(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"""
try:
# initiate an HTTP session
session = HTMLSession()
# get the HTTP Response
r = session.get(url, timeout=10)
# for JAVA-Script driven websites
r.html.render()
single_url_email = []
for re_match in re.finditer(EMAIL_REGEX, r.html.raw_html.decode()):
single_url_email.append(re_match.group().lower())
r.session.close()
return set(single_url_email)
except:
pass
def crawl_website_scrape_email(url, max_internal_url_no=20):
crawl(url,max_urls=max_internal_url_no)
each_url_emails = []
global internal_urls
global emails
for each_url in internal_urls:
each_url_emails.append(scrape_email(each_url))
URL_WITH_EMAILS={'main_url': url, 'emails':each_url_emails}
emails = {}
internal_urls = set()
return URL_WITH_EMAILS
def list_check(emails_list, email_match):
match_indexes = [i for i, s in enumerate(emails_list) if email_match in s]
return [emails_list[index] for index in match_indexes]
URL_WITH_EMAILS_LIST = [crawl_website_scrape_email(x) for x in input_df['Website'].values]
URL_WITH_EMAILS_DF = pd.DataFrame(data = URL_WITH_EMAILS_LIST)
URL_WITH_EMAILS_DF.to_excel(f"{input_file_name}_email-output.xlsx", index=False)
How can I solve the issue of not being able to scrape email from some of those above-mentioned and similar type of websites?
Is there also any way to detect and print strings if my get request is refused by bot detector or related protocols?
Also how can I make this code more robust?
Thank you in advance

python-Using urllib to retrieve web content but got differet content from what I got using browser

I want to write an translation api using this site, which has many desirable features when deal with sentences with wildcards.
First I use F12 in chrome to see what request url is using to produce the result.
I checked that only salt and sigh changed when I use different inputs.
So I look the js source code to see how salt and sigh were produced.
Then I use python library urllib to send the request and get the response. But the response translation was not the same when I use the browser to get it. For example,
Input :"what album was #head_entity# released on?"
Output_browser: "#head_entity#发布了什么专辑?"
Output_python:"发布的专辑是什么# head_entity?#"
which is clearly different.
This is the code for producing my result:
import urllib.request
import urllib.parse
import json
import time
import random
import hashlib
def translator(content):
"""arg:content"""
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
u = 'fanyideskweb'
d = content
f = str(int(time.time()*1000) + random.randint(1,10))
c = 'rY0D^0\'nM0}g5Mm1z%1G4'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
data['i'] = content
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CL1CKBUTTON'
data['typoResult'] = 'true'
data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(url=url,data=data,method='POST')
response = urllib.request.urlopen(request)
d = json.loads(response.read().decode('utf-8'))
return d['translateResult'][0][0]['tgt']
translator('what album was #head_entity# released on?')
The only thing I think I changed to make the request different to the original page was the url argument in the code:
My_url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
Original_url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule' which gave me an error {"errorCode":50}
I checked the header and data parameters one by one but still can't solve the problem. I have no idea why this happened. Any ideas?

boto3 pagination: Query contents of page by it's exact number

Trying to implement pagination using boto's get_paginator for query operation. I can't find proper way how to get, let's say, page num 3 without loading contents of previous two pages:
import boto3
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource("dynamodb", region_name="us-west-2")
paginator = dynamodb.meta.client.get_paginator("query")
pages = paginator.paginate(TableName="results",
IndexName="year-timestamp-index",
KeyConditionExpression=Key("year").eq("2017"),
PaginationConfig={"PageSize": 50})
def get_content_of_page(requested_page_num, pages):
pages_counter = 0
for page in pages:
if pages_counter == requested_page_num - 1:
return page.get("Items")
pages_counter += 1
get_content_of_page(pages, 3) # will load and skip contents of
# 2 pages and return content of 3rd
I consider it's too expensive, is it possible to get directly 3rd page?
Like Mongo's cursor.skip(page_size * (requested_page_num - 1)).limit(page_size)

Issue with web crawler: IndexError: string index out of range

I am making a web crawler. I'm not using scrapy or anything, I'm trying to have my script do most things. I have tried doing a search for the issue however I can't seem to find anything that helps with the error. I've tried switching around some of the variable to try and narrow down the problem. I am getting an error on line 24 saying IndexError: string index out of range. The functions run on the first url, (the original url) then the second and fail on the third in the original array. I'm lost, any help would be appreciated greatly! Note, I'm only printing all of them for testing, I'll eventually have them printed to a text file.
import requests
from bs4 import BeautifulSoup
# creating requests from user input
url = raw_input("Please enter a domain to crawl, without the 'http://www' part : ")
def makeRequest(url):
r = requests.get('http://' + url)
# Adding in BS4 for finding a tags in HTML
soup = BeautifulSoup(r.content, 'html.parser')
# Writes a as the link found in the href
output = soup.find_all('a')
return output
def makeFilter(link):
# Creating array for our links
found_link = []
for a in link:
a = a.get('href')
a_string = str(a)
# if statement to filter our links
if a_string[0] == '/': # this is the line with the error
# Realtive Links
found_link.append(a_string)
if 'http://' + url in a_string:
# Links from the same site
found_link.append(a_string)
if 'https://' + url in a_string:
# Links from the same site with SSL
found_link.append(a_string)
if 'http://www.' + url in a_string:
# Links from the same site
found_link.append(a_string)
if 'https://www.' + url in a_string:
# Links from the same site with SSL
found_link.append(a_string)
#else:
# found_link.write(a_string + '\n') # testing only
output = found_link
return output
# Function for removing duplicates
def remove_duplicates(values):
output = []
seen = set()
for value in values:
if value not in seen:
output.append(value)
seen.add(value)
return output
# Run the function with our list in this order -> Makes the request -> Filters the links -> Removes duplicates
def createURLList(values):
requests = makeRequest(values)
new_list = makeFilter(requests)
filtered_list = remove_duplicates(new_list)
return filtered_list
result = createURLList(url)
# print result
# for verifying and crawling resulting pages
for b in result:
sub_directories = createURLList(url + b)
crawler = []
crawler.append(sub_directories)
print crawler
After a_string = str(a) try adding:
if not a_string:
continue

Adding counter to my Python Web Crawler

I've made a web crawler which gives a link and text from link for all sites in given addres it looks like this:
import urllib
from bs4 import BeautifulSoup
import urlparse
import mechanize
url = ["http://adbnews.com/area51"]
for u in url:
br = mechanize.Browser()
urls = [u]
visited = [u]
i = 0
while i<len(urls):
try:
br.open(urls[0])
urls.pop(0)
for link in br.links():
levelLinks = []
linkText = []
newurl = urlparse.urljoin(link.base_url, link.url)
b1 = urlparse.urlparse(newurl).hostname
b2 = urlparse.urlparse(newurl).path
newurl = "http://"+b1+b2
linkTxt = link.text
linkText.append(linkTxt)
levelLinks.append(newurl)
if newurl not in visited and urlparse.urlparse(u).hostname in newurl:
urls.append(newurl)
visited.append(newurl)
#print newurl
#get Mechanize Links
for l,lt in zip(levelLinks,linkText):
print newurl,"\n",lt,"\n"
except:
urls.pop(0)
it gets results like that:
http://www.adbnews.com/area51/contact.html
CONTACT
http://www.adbnews.com/area51/about.html
ABOUT
http://www.adbnews.com/area51/index.html
INDEX
http://www.adbnews.com/area51/1st/
FIRST LEVEL!
http://www.adbnews.com/area51/1st/bling.html
BLING
http://www.adbnews.com/area51/1st/index.html
INDEX
http://adbnews.com/area51/2nd/
2ND LEVEL
And I wanna add a counter of somekind which could limit how deep crawler goes..
I've tried add for example steps = 3 and change while i<len(urls) in while i<steps:
but that would go only to first level even the number says 3...
Any advice is welcome
If you want to search a certain "depth", consider using a recursive function instead of just appending a list of URL's.
def crawl(url, depth):
if depth <= 3:
#Scan page, grab links, title
for link in links:
print crawl(link, depth + 1)
return url +"\n"+ title
This allows for easier control of your recursive searching, as well as being faster and less resource heavy :)

Categories

Resources