I'm having some problem with this code.I have a Http request where I need to request some information from an API.I'm having troubles with the urllib request, I don't think I'm using the right library because the code I got it uses urllib2 put since python 3 is different I don't know which one to use.
import urllib.request
import json
import os
from pprint import pprint
REST_URL = "http://data.bioontology.org"
API_KEY = "MYAPIKEY"
def get_json(url):
opener = urllib.request.urlopen(url)
opener.addheaders = [('Authorization', 'apikey token=' + API_KEY)]
return json.loads(opener.open(url).read())
print(url)
# Get list of search terms
path = os.path.join(os.path.dirname("__file__"), 'classes_search_terms.txt')
terms_file = open(path, "r")
terms = []
for line in terms_file:
terms.append(line)
# Do a search for every term
search_results = []
for term in terms:
search_results.append(get_json(REST_URL + "/search?q=" + term)["collection"])
# Print the results
for result in search_results:
pprint(result)
Related
I made a code to download pdfs from a website, and it works perfectly, downloading all the PDF's (first code below). However, when I split my code into functions, only two links are inserted into the "papers" list and the execution ends with code zero, but the following warning message appears:
GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 11 of the file C:\Downloads\EditoraCL\download_pdf.py. To get rid of this warning, pass the additional argument 'features="html.parser"' to the BeautifulSoup constructor.
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
FIRST CODE:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
print(papers)
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
print(pdfs)
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
print('Download concluído. Salvo em {}'.format(endereco))
else:
resposta.raise_for_status()
if __name__ == '__main__':
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
CODE DIVIDED INTO FUNCTIONS:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
def busca_links():
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
return papers
def links_pdf():
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
return pdfs
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
return f'Download concluído. Salvo em {endereco}'
else:
resposta.raise_for_status()
if __name__ == '__main__':
busca_links()
links_pdf()
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
print(papers)
print(pdfs)
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
Could someone help me understand why the second code is giving this error?
Functions do not share their inner variables, so in order to make your code work, you should assign "papers" to the function itself, after returning it inside the function ( papers = busca_links() and links_pdf(papers) ).
Anyway, for the purpose of organization and clearer code, you should use classes and methods:
import os
import requests
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
class Pdf:
def __init__(self, base_url, url):
self.main_dir = os.path.dirname(__file__)
self.pdfs_dir = os.path.join(self.main_dir, 'pdfs')
self.base_url = base_url
self.url = url
def get_links(self):
http = httplib2.Http()
status, response = http.request(self.url)
self.links = []
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if link['href'].endswith('pdf'):
self.links.append(f"{self.base_url}{link['href']}")
def download_pdf(self):
for link in self.links:
response = requests.get(link, stream=True)
if response.status_code == 200:
file_path = os.path.join(self.pdfs_dir, link.split('/')[-1])
with open(file_path, 'wb') as f:
f.write(response.content)
print('Success. Saved on {}'.format(file_path))
else:
# Should handle errors here, by appending them to a list and
# trying again later.
print('Error.')
if __name__ == '__main__':
base_url = 'https://www.snh2021.anpuh.org/'
url = f'{base_url}site/anais'
pdf = Pdf(base_url, url)
pdf.get_links()
pdf.download_pdf()
I want to write an translation api using this site, which has many desirable features when deal with sentences with wildcards.
First I use F12 in chrome to see what request url is using to produce the result.
I checked that only salt and sigh changed when I use different inputs.
So I look the js source code to see how salt and sigh were produced.
Then I use python library urllib to send the request and get the response. But the response translation was not the same when I use the browser to get it. For example,
Input :"what album was #head_entity# released on?"
Output_browser: "#head_entity#发布了什么专辑?"
Output_python:"发布的专辑是什么# head_entity?#"
which is clearly different.
This is the code for producing my result:
import urllib.request
import urllib.parse
import json
import time
import random
import hashlib
def translator(content):
"""arg:content"""
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
u = 'fanyideskweb'
d = content
f = str(int(time.time()*1000) + random.randint(1,10))
c = 'rY0D^0\'nM0}g5Mm1z%1G4'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
data['i'] = content
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CL1CKBUTTON'
data['typoResult'] = 'true'
data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(url=url,data=data,method='POST')
response = urllib.request.urlopen(request)
d = json.loads(response.read().decode('utf-8'))
return d['translateResult'][0][0]['tgt']
translator('what album was #head_entity# released on?')
The only thing I think I changed to make the request different to the original page was the url argument in the code:
My_url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
Original_url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule' which gave me an error {"errorCode":50}
I checked the header and data parameters one by one but still can't solve the problem. I have no idea why this happened. Any ideas?
I am trying to get the user statuses from Weibo, but I keep having this error.
import re
import string
import sys
import os
import urllib
import urllib2
from bs4 import BeautifulSoup
import requests
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf-8')
if(len(sys.argv)>=2):
user_id = (int)(sys.argv[1])
else:
user_id = (int)(raw_input("input user_id: "))
cookie = {"Cookie": "******my cookies"}
url = 'http://weibo.cn/u/%d?filter=1&page=1'%user_id
html = requests.get(url, cookies = cookie).content
selector = etree.HTML(html)
pageNum = (int)(selector.xpath('//input[#name="mp"]')[0].attrib['value'])
result = ""
urllist_set = set()
word_count = 1
image_count = 1
print 'spider is ready...'
for page in range(1,pageNum+1):
url = 'http://weibo.cn/u/%d?filter=1&page=%d'%(user_id,page)
lxml = requests.get(url, cookies = cookie).content
selector = etree.HTML(lxml)
content = selector.xpath('//span[#class="ctt"]')
for each in content:
text = each.xpath('string(.)')
if word_count>=4:
text = "%d :"%(word_count-3) +text+"\n\n"
else :
text = text+"\n\n"
result = result + text
word_count += 1
fo = open("/Users/apple/Desktop/%s"%user_id, "wb")
fo.write(result)
word_path=os.getcwd()+'/%d'%user_id
print 'done'
Error:
File "weibo_spider.py", line 25, in <module>
pageNum = (int)(selector.xpath('//input[#name="mp"]')[0].attrib['value'])
IndexError: list index out of range
You are assuming selector.path will always find something, but this isn't true most of cases. So build habit of defensive programming. See Defensive Programming
Try replacing
pageNum = (int)(selector.xpath('//input[#name="mp"]')[0].attrib['value'])
With:
controls = selector.xpath('//input[#name="mp"]')
if controls:
pageNum = int(controls[0].attrib['value'])
I have recently followed this(http://blog.dispatched.ch/2009/03/15/webscraping-with-python-and-beautifulsoup/) tutorial on web scraping with python and beautifulsoup. Unfortunately, this tutorial was written with a 2.x version of python while I am using verion 3.2.3 of python as to focus learning a language version which is still developing for the future. My program retrieves, opens, reads and enters the search term on web page just fine(as far as I can tell) but it doesn't enter the
for result in results
loop so nothing is collected and printed. I think this may have to do with how I have assigned results but I am unsure of how to fix it. Here's my code:
`
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import string
import sys
user_agent = 'Mozilla/5 (Solaris 10) Gecko'
headers = {'user-agent' : user_agent}
url = "http://www.dict.cc";
values = {'s' : 'rainbow'}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
request = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(request)
the_page = response.read()
pool = BeautifulSoup(the_page)
results = pool.find_all('td', attrs = {'class' : 'td7n1'})
source = ''
translations = []
for result in results:
word = ''
for tmp in result.find_all(text = True):
word += " " + unicode(tmp).encode("utf-8")
if source == '':
source = word
else:
translations.append((source, word))
for translation in translations:
print ("%s => %s", translation[0], translation[1])
This link lets me get a random item from database. However, I would like to automatically retrieve items using Python. Here's my code:
import sys
from urllib.parse import urlencode
from urllib.request import urlopen
# parameters
data = {}
data["query"] = "reviewd:yes+AND+organism:9606"
data["random"] = "yes"
url_values = urlencode(data)
url = "http://www.uniprot.org/uniprot/"
full_url = url + '?' + url_values
data = urlopen(full_url)
out = open("1.html", 'w')
out.write(str(data.read()))
However, I cannot get the desired page. Anyone knows what's wrong with my code? I'm using Python 3.x.
You have several issues:
reviewd is misspelled, it should be reviewed
The base url needs to have /uniprot/ at the end
You need to use space instead of + in your query string
Here is what that would look like:
import sys
from urllib.parse import urlencode
from urllib.request import urlopen
# parameters
data = {}
data["query"] = "reviewed:yes AND organism:9606"
data["random"] = "yes"
url_values = urlencode(data)
url = "http://www.uniprot.org/uniprot/"
full_url = url + '?' + url_values
data = urlopen(full_url)
out = open("1.html", 'w')
out.write(str(data.read()))
This produces the following URL:
http://www.uniprot.org/uniprot/?query=reviewed%3Ayes+AND+organism%3A9606&random=yes