Having difficulties with my python script - python

I am new to Python and can use a little help. I am trying to write a script that will go out to a specific web site and download multiple .gif images in different spots at that site. Can anyone assist me in the right direction to take. This is the first one I have tried to make.
Here is what i got so far.
from http:// import http://folkworm.ceri.memphis.edu/heli/heli_bb_ag/ as bs
import urlparse
from urllib2 import urlopen
from urllib import urlretrieve
import os
import sys
def main(url, out_folder="C:\Users\jerry\Desktop\Heli/"):
"""Downloads all the images at 'url' to /test/"""
http://folkworm.ceri.memphis.edu/heli/heli_bb_ag/ = bs(urlopen(url))
parsed = list(urlparse.urlparse(url))
for image in http://folkworm.ceri.memphis.edu/heli/heli_bb_ag/.findAll("gif"):
print "gif: %(src)s" % image
filename = gif["src"].split("/")[-1]
parsed[2] = gif["src"]
outpath = os.path.join(out_folder, filename)
if gif["src"].lower().startswith("http"):
urlretrieve(gif["src"], outpath)
else:
urlretrieve(urlparse.urlunparse(parsed), outpath)
def _usage():
print "usage: python dumpimages.py http:folkworm.ceri.memphis.edu/heli/heli_bb_ag/ [outpath]"
if __name__ == "__main__":
url = sys.argv[-1]
out_folder = "/test/"
if not url.lower().startswith("http"):
out_folder = sys.argv[-1]
url = sys.argv[-2]
if not url.lower().startswith("http"):
_usage()
sys.exit(-1)
main(url, out_folder)

Here is the basic idea.
>>> import requests
>>> from bs4 import BeautifulSoup
>>> item = requests.get('http://folkworm.ceri.memphis.edu/heli/heli_bb_ag/')
>>> page = item.text
>>> soup = BeautifulSoup(page, 'lxml')
>>> links = soup.findAll('a')
>>> for link in links:
... if '.gif' in link.attrs['href']:
... print (link.attrs['href'])
... break
...
CCAR_HHZ_AG_00.2017012700.gif?v=1485534942
The break statement is there just to interrupt the script so that it doesn't print all of the names of the gif's. The next step would be to add code to that loop to concatenate the URL mentioned in the requests.get to the name of each gif and do a requests.get for it. This time though you would do, say, image = item.content to get the image in bytes, which you could write to a file of your choice.
EDIT: Fleshed out. Note you still need to arrange to provide one file name for each output file.
>>> import requests
>>> from bs4 import BeautifulSoup
>>> URL = 'http://folkworm.ceri.memphis.edu/heli/heli_bb_ag/'
>>> item = requests.get(URL)
>>> page = item.text
>>> soup = BeautifulSoup(page, 'lxml')
>>> links = soup.findAll('a')
>>> for link in links:
... if '.gif' in link.attrs['href']:
... print (link.attrs['href'])
... pic = requests.get(URL + link.attrs['href'])
... image = pic.content
... open('pic.gif', 'wb').write(image)
... break
...
CCAR_HHZ_AG_00.2017012700.gif?v=1485535857
100846

Related

I'm not able to split my code into functions

I made a code to download pdfs from a website, and it works perfectly, downloading all the PDF's (first code below). However, when I split my code into functions, only two links are inserted into the "papers" list and the execution ends with code zero, but the following warning message appears:
GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 11 of the file C:\Downloads\EditoraCL\download_pdf.py. To get rid of this warning, pass the additional argument 'features="html.parser"' to the BeautifulSoup constructor.
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
FIRST CODE:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
print(papers)
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
print(pdfs)
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
print('Download concluĂ­do. Salvo em {}'.format(endereco))
else:
resposta.raise_for_status()
if __name__ == '__main__':
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
CODE DIVIDED INTO FUNCTIONS:
import requests
import httplib2
import os
from bs4 import BeautifulSoup, SoupStrainer
papers = []
pdfs = []
def busca_links():
http = httplib2.Http()
status, response = http.request('https://www.snh2021.anpuh.org/site/anais')
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
papers.append(link['href'])
return papers
def links_pdf():
for x in papers:
if x.endswith('pdf'):
pdfs.append(x)
return pdfs
def baixa_arquivo(url, endereco):
resposta = requests.get(url)
if resposta.status_code == requests.codes.OK:
with open(endereco, 'wb') as novo_arquivo:
novo_arquivo.write(resposta.content)
return f'Download concluĂ­do. Salvo em {endereco}'
else:
resposta.raise_for_status()
if __name__ == '__main__':
busca_links()
links_pdf()
url_basica = 'https://www.snh2021.anpuh.org/{}'
output = 'Download'
print(papers)
print(pdfs)
for i in range(1, len(pdfs)):
nome_do_arquivo = os.path.join(output, 'artigo{}.pdf'.format(i))
a = pdfs[i]
z = url_basica.format(a)
y = requests.get(z)
if y.status_code!=404:
baixa_arquivo(z, nome_do_arquivo)
Could someone help me understand why the second code is giving this error?
Functions do not share their inner variables, so in order to make your code work, you should assign "papers" to the function itself, after returning it inside the function ( papers = busca_links() and links_pdf(papers) ).
Anyway, for the purpose of organization and clearer code, you should use classes and methods:
import os
import requests
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
class Pdf:
def __init__(self, base_url, url):
self.main_dir = os.path.dirname(__file__)
self.pdfs_dir = os.path.join(self.main_dir, 'pdfs')
self.base_url = base_url
self.url = url
def get_links(self):
http = httplib2.Http()
status, response = http.request(self.url)
self.links = []
for link in BeautifulSoup(response, parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if link['href'].endswith('pdf'):
self.links.append(f"{self.base_url}{link['href']}")
def download_pdf(self):
for link in self.links:
response = requests.get(link, stream=True)
if response.status_code == 200:
file_path = os.path.join(self.pdfs_dir, link.split('/')[-1])
with open(file_path, 'wb') as f:
f.write(response.content)
print('Success. Saved on {}'.format(file_path))
else:
# Should handle errors here, by appending them to a list and
# trying again later.
print('Error.')
if __name__ == '__main__':
base_url = 'https://www.snh2021.anpuh.org/'
url = f'{base_url}site/anais'
pdf = Pdf(base_url, url)
pdf.get_links()
pdf.download_pdf()

if string contains from list

I want to check if any of the excluded sites show up. I can get it to work with just one site, but as soon as I make it a list, it errors at if donts in thingy:
TypeError: 'in ' requires string as left operand, not tuple"
This is my code:
import requests
from bs4 import BeautifulSoup
from lxml import html, etree
import sys
import re
url = ("http://stackoverflow.com")
donts = ('stackoverflow.com', 'stackexchange.com')
r = requests.get(url, timeout=6, verify=True)
soup = BeautifulSoup(r.content, 'html.parser')
for link in soup.select('a[href*="http"]'):
thingy = (link.get('href'))
thingy = str(thingy)
if donts in thingy:
pass
else:
print (thingy)
import requests
from bs4 import BeautifulSoup
from lxml import html, etree
import sys
import re
url = ("http://stackoverflow.com")
donts = ('stackoverflow.com', 'stackexchange.com')
r = requests.get(url, timeout=6, verify=True)
soup = BeautifulSoup(r.content, 'html.parser')
for link in soup.select('a[href*="http"]'):
thingy = (link.get('href'))
thingy = str(thingy)
if thingy in donts :
print (thingy)
else:
pass
Judge: string in tuple
The crux of your problem is how you're searching your excluded list:
excluded = ("a", "b", "c")
links = ["a", "d", "e"]
for site in links:
if site not in excluded: # We want to know if the site is in the excluded list
print(f"Site not excluded: {site}")
Reverse the order of your elements and this should work fine. I've inverted your logic here so you can skip the unnecessary pass.
As a side note, this is one reason clear variable names can help - they will help you reason about what the logic should be doing. Especially in Python where ergonomics like in exist, this is very useful.
import requests
from bs4 import BeautifulSoup
from lxml import html, etree
import sys
import re
url = ("http://stackoverflow.com")
donts = ('stackoverflow.com', 'stackexchange.com')
r = requests.get(url, timeout=6, verify=True)
soup = BeautifulSoup(r.content, 'html.parser')
for link in soup.select('a[href*="http"]'):
thingy = (link.get('href'))
thingy = str(thingy)
if any(d in thingy for d in donts):
pass
else:
print (thingy)

How to get HTML of the whole page without scrolling?

import requests
import urllib.request
from bs4 import BeautifulSoup
def get_photos(nick,how_many):
url = f"https://www.picuki.com/profile/{nick}"
content = requests.get(url,headers={'User-Agent': 'Mozilla/5.0'}).content
soup = BeautifulSoup(content,"html.parser")
images = [f["src"] for f in soup.findAll('img',class_="post-image")]
for index, image in enumerate(images, start=1):
urllib.request.urlretrieve(image, f"/Users/user/PycharmProjects/untitled1/Instagram_images/image{index}.png")
if index == how_many: break
if __name__ == "__main__":
get_photos("Username",20)
So I have this code which downloads images in png format from instagram. But problem is that this page only loads 18 images without scrolling. So if I input 18-36 I need to scroll down page one more time, if 36-54 I need to scroll down 2 times and get it's HTML. How to do it with request and is it even possible with this module?
The images are loaded with Ajax, but you can emulate the Ajax with requests module.
This script will print all image URLs found on user profile:
import requests
from bs4 import BeautifulSoup
username = 'itsdougthepug'
base_url = 'https://www.picuki.com/profile/{username}'
def get_image_urls(username):
url = base_url.format(username=username)
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
while True:
for f in soup.findAll('img',class_="post-image"):
yield f['src']
load_more_url = soup.select_one('.load-more-wrapper[data-next]')
if not load_more_url:
load_more_url = soup.select_one('.pagination-next-page-input[value]')
if load_more_url:
load_more_url = load_more_url['value']
else:
load_more_url = load_more_url['data-next']
if not load_more_url:
break
soup = BeautifulSoup(requests.get('https://www.picuki.com' + load_more_url).content, 'html.parser')
for img in get_image_urls(username):
print(img)
Prints:
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/p640x640/103328423_965950027183296_957866876806120724_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=100&_nc_ohc=sW8Ic2lI-4UAX_b7bkB&oh=dc42f3f625065b6fba524bd39fc29cb0&oe=5EE7819B
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/p640x640/103183716_3364797436946158_1962633742202963007_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=1&_nc_ohc=OjegUcacb2kAX_BGNBA&oh=92a8035ffed07e724a77617c6ff73b73&oe=5F0F1F22
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/s640x640/102951446_2650089068539996_1395066409287738000_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=1&_nc_ohc=zXDXxxtqYUkAX9_1jE3&oh=06e83257c7a2b1cfea593719a3af60d2&oe=5F0D3F32
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/p640x640/103290695_2721943028038123_664290938707092396_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=107&_nc_ohc=cZKGnM3wjBwAX9wsGvR&oh=132218410341a0ffc2d7d78f38904a01&oe=5F104353
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/p640x640/103207650_283928112789317_1081832932435688252_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=105&_nc_ohc=3XfsL50CwCoAX9k2_dN&oh=969bdf74e73466a39952957bfd8ec528&oe=5F0E2A91
https://scontent-sin6-2.cdninstagram.com/v/t51.2885-15/sh0.08/e35/s640x640/102546510_111827600395599_8198630171951588410_n.jpg?_nc_ht=scontent-sin6-2.cdninstagram.com&_nc_cat=103&_nc_ohc=cVJqLrxo-fUAX9fBZtG&oh=8edcc8a5bf56519d0155e6d23ac514b3&oe=5F0EA104
... and so on.

How to detect a strong tag and add a "*" to each?

I have this code in python, and what it does to me is to stretch from a web. The text content of the articles of the web, and save them in different files. I would like to know, how to detect a strong tag and in each one of them add a " " before or after.
This is the result that I need:
import urllib2
import re
from bs4 import BeautifulSoup
import time
def _remove_attrs(soup):
for tag in soup.findAll(True):
href=''
if (tag.has_attr('href')):
href=tag.get('href')
src=''
if (tag.has_attr('src')):
src=tag.get('src')
# tag.attrs = None
tag.attrs = {}
if (href!=''):
tag['href']= href
if (src!=''):
tag['src']= src
return soup
def _remove_empty(soup):
return soup
for x in soup.find_all():
if len(x.text) == 0:
x.extract()
return soup
base_url= 'http://www.scavonehnos.com.py/index.php?
mact=Vmcs,cntnt01,print,0&cntnt01articleid='
for x in range(10,12):
n_url=base_url + str(x)
print ("#PAGINA: "+n_url)
page = urllib2.urlopen(n_url)
soup = BeautifulSoup(page, 'html.parser')
contenido=(soup.div.get_text())
file = open('vicentec/prod_'+str(x)+'.txt', 'w')
file.write(u' '.strip(contenido).join((contenido)).encode('utf-
8'))
file.close()
time.sleep(5)
As you will see I want to add the asterisk to the <strong> tag on the web.
For those who visited this question this case I already solved it and it stayed and it works perfectly
import urllib2
import re
from bs4 import BeautifulSoup
import time
def _remove_attrs(soup):
for tag in soup.findAll(True):
href=''
if (tag.has_attr('href')):
href=tag.get('href')
src=''
if (tag.has_attr('src')):
src=tag.get('src')
# tag.attrs = None
tag.attrs = {}
if (href!=''):
tag['href']= href
if (src!=''):
tag['src']= src
return soup
def _remove_empty(soup):
return soup
for x in soup.find_all(''):
if len(x.text) == 0:
x.extract()
return soup
base_url= 'http://www.scavonehnos.com.py/index.php?mact=Vmcs,cntnt01,print,0&cntnt01articleid='
for x in range(10,225):
n_url=base_url + str(x)
print ("#PAGINA: "+n_url)
page = urllib2.urlopen(n_url)
soup = BeautifulSoup(page, 'html.parser')
for strong in soup.select('strong'):
strong.replace_with('#'+strong.get_text())
contenido=(soup.div.get_text())
fprod = 'vicentec/prod_'+(str(x))+'.txt'
file = open(fprod, "w")
file.write(u' '.strip(contenido).join((contenido)).encode('utf-8'))
file.close()

How to get favicon by using beautiful soup and python

I wrote some stupid code for learning just, but it doesn't work for any sites.
here is the code:
import urllib2, re
from BeautifulSoup import BeautifulSoup as Soup
class Founder:
def Find_all_links(self, url):
page_source = urllib2.urlopen(url)
a = page_source.read()
soup = Soup(a)
a = soup.findAll(href=re.compile(r'/.a\w+'))
return a
def Find_shortcut_icon (self, url):
a = self.Find_all_links(url)
b = ''
for i in a:
strre=re.compile('shortcut icon', re.IGNORECASE)
m=strre.search(str(i))
if m:
b = i["href"]
return b
def Save_icon(self, url):
url = self.Find_shortcut_icon(url)
print url
host = re.search(r'[0-9a-zA-Z]{1,20}\.[a-zA-Z]{2,4}', url).group()
opener = urllib2.build_opener()
icon = opener.open(url).read()
file = open(host+'.ico', "wb")
file.write(icon)
file.close()
print '%s icon successfully saved' % host
c = Founder()
print c.Save_icon('http://lala.ru')
The most strange thing is it works for site:
http://habrahabr.ru
http://5pd.ru
But doesn't work for most others that I've checked.
You're making it far more complicated than it needs to be. Here's a simple way to do it:
import urllib
page = urllib.urlopen("http://5pd.ru/")
soup = BeautifulSoup(page)
icon_link = soup.find("link", rel="shortcut icon")
icon = urllib.urlopen(icon_link['href'])
with open("test.ico", "wb") as f:
f.write(icon.read())
Thomas K's answer got me started in the right direction, but I found some websites that didn't say rel="shortcut icon", like 1800contacts.com that says just rel="icon". This works in Python 3 and returns the link. You can write that to file if you want.
from bs4 import BeautifulSoup
import requests
def getFavicon(domain):
if 'http' not in domain:
domain = 'http://' + domain
page = requests.get(domain)
soup = BeautifulSoup(page.text, features="lxml")
icon_link = soup.find("link", rel="shortcut icon")
if icon_link is None:
icon_link = soup.find("link", rel="icon")
if icon_link is None:
return domain + '/favicon.ico'
return icon_link["href"]
In case anyone wants to use a single check with regex, the following works for me:
import re
from bs4 import BeautifulSoup
html_code = "<Some HTML code you get from somewhere>"
soup = BeautifulSoup(html_code, features="lxml")
for item in soup.find_all('link', attrs={'rel': re.compile("^(shortcut icon|icon)$", re.I)}):
print(item.get('href'))
This will also account for occurrences of case sensitivity.
Thank you, kurd. Here is the code with some changes:
import urllib2
from BeautifulSoup import BeautifulSoup
url = "http://www.facebook.com"
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read())
icon_link = soup.find("link", rel="shortcut icon")
try:
icon = urllib2.urlopen(icon_link['href'])
except:
icon = urllib2.urlopen(url + icon_link['href'])
iconname = url.split(r'/')
iconname = iconname[2].split('.')
iconname = iconname[1] + '.' + iconname[2] + '.ico'
with open(iconname, "wb") as f:
f.write(icon.read())
Thank you, Thomas.
Here is the code wiith some changes:
import urllib2
from BeautifulSoup import BeautifulSoup
page = urllib2.urlopen("http://5pd.ru/")
soup = BeautifulSoup(page.read())
icon_link = soup.find("link", rel="shortcut icon")
icon = urllib2.urlopen(icon_link['href'])
with open("test.ico", "wb") as f:
f.write(icon.read())

Categories

Resources