find all a href from table - python

I'm trying to scrape rotten tomatoes with bs4
My aim is to find all a hrefs from the table but i cannot do it can you help me?
https://www.rottentomatoes.com/top/bestofrt/top_100_action__adventure_movies/
my code is
from urllib import request
from bs4 import BeautifulSoup as BS
import re
import pandas as pd
url = 'https://www.rottentomatoes.com/top/bestofrt'
html = request.urlopen(url)
bs = BS(html.read(), 'html.parser')
tags = bs.find_all('a', {'class':'articleLink unstyled'})[7:]
links = ['https://www.rottentomatoes.com' + tag['href'] for tag in tags]
########################################### links ############################################################################
webpages = []
for link in reversed(links):
print(link)
html = request.urlopen(link)
bs = BS(html.read(), 'html.parser')
tags = bs.find_all('a', {'class':'unstyled articleLink'})[43:]
links = ['https://www.rottentomatoes.com' + tag['href'] for tag in tags]
webpages.extend(links)
print(webpages)
I put a limit to 43 in order to avoid useless links except for movies but it is a short term solution and does not help
I need to find an exact solution on how to scrape from table without scrape irrelevant information
thanks

Just grab the main table and then extract all the <a> tags.
For example:
import requests
from bs4 import BeautifulSoup
rotten_tomatoes_url = 'https://www.rottentomatoes.com/top/bestofrt/top_100_action__adventure_movies/'
action_and_adventure = [
f"https://www.rottentomatoes.com{link.get('href')}"
for link in
BeautifulSoup(
requests.get(rotten_tomatoes_url).text,
"lxml",
)
.find("table", class_="table")
.find_all("a")
]
print(len(action_and_adventure))
print("\n".join(action_and_adventure[:10]))
Output (all 100 links to movies):
100
https://www.rottentomatoes.com/m/black_panther_2018
https://www.rottentomatoes.com/m/avengers_endgame
https://www.rottentomatoes.com/m/mission_impossible_fallout
https://www.rottentomatoes.com/m/mad_max_fury_road
https://www.rottentomatoes.com/m/spider_man_into_the_spider_verse
https://www.rottentomatoes.com/m/wonder_woman_2017
https://www.rottentomatoes.com/m/logan_2017
https://www.rottentomatoes.com/m/coco_2017
https://www.rottentomatoes.com/m/dunkirk_2017
https://www.rottentomatoes.com/m/star_wars_the_last_jedi

try this:
tags = bs.find_all(name='a', {'class':'unstyled articleLink'})[43:]

Related

How to scrape review to dataframe

I would like to scratch the reviews from this page and save them as a data frame, but I do not download star ratings and the text of the review. Just only text. What i did wrong?
import csv
import pandas as pd
import requests
from bs4 import BeautifulSoup
page = requests.get("https://www.morele.net/pralka-candy-cs4-1062d3-950636/?sekcja=reviews-all")
soup = BeautifulSoup(page.content, "html.parser",
).find_all("div", {"class":"reviews-item"})
# print(soup)
morele = [div.getText(strip=True) for div in soup]
print(morele)
csv_table = pd.DataFrame(morele)
csv_table = csv_table.reset_index(drop=True)
csv_table.insert(0,'No.',csv_table.index)
You are mostly there - just further navigate the DOM and you can get just the text.
import requests
from bs4 import BeautifulSoup
page = requests.get("https://www.morele.net/pralka-candy-cs4-1062d3-950636/?sekcja=reviews-all")
soup = BeautifulSoup(page.content, "html.parser",)
data = [{"text":ri.find("div", {"class":"rev-desc"}).getText(strip=True) ,
"stars":ri.find("div", {"class":"rev-stars"}).getText(strip=True)}
for ri in soup.find_all("div", {"class":"reviews-item"})
]
pd.DataFrame(data)

How to get just links of articles in list using BeautifulSoup

Hey guess so I got as far as being able to add the a class to a list. The problem is I just want the href link to be added to the links_with_text list and not the entire a class. What am I doing wrong?
from bs4 import BeautifulSoup
from requests import get
import requests
URL = "https://news.ycombinator.com"
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(id = 'hnmain')
articles = results.find_all(class_="title")
links_with_text = []
for article in articles:
link = article.find('a', href=True)
links_with_text.append(link)
print('\n'.join(map(str, links_with_text)))
This prints exactly how I want the list to print but I just want the href from every a class not the entire a class. Thank you
To get all links from the https://news.ycombinator.com, you can use CSS selector 'a.storylink'.
For example:
from bs4 import BeautifulSoup
from requests import get
import requests
URL = "https://news.ycombinator.com"
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
links_with_text = []
for a in soup.select('a.storylink'): # <-- find all <a> with class="storylink"
links_with_text.append(a['href']) # <-- note the ['href']
print(*links_with_text, sep='\n')
Prints:
https://blog.mozilla.org/futurereleases/2020/06/18/introducing-firefox-private-network-vpns-official-product-the-mozilla-vpn/
https://mxb.dev/blog/the-return-of-the-90s-web/
https://github.blog/2020-06-18-introducing-github-super-linter-one-linter-to-rule-them-all/
https://www.sciencemag.org/news/2018/11/why-536-was-worst-year-be-alive
https://www.strongtowns.org/journal/2020/6/16/do-the-math-small-projects
https://devblogs.nvidia.com/announcing-cuda-on-windows-subsystem-for-linux-2/
https://lwn.net/SubscriberLink/822568/61d29096a4012e06/
https://imil.net/blog/posts/2020/fakecracker-netbsd-as-a-function-based-microvm/
https://jepsen.io/consistency
https://tumblr.beesbuzz.biz/post/621010836277837824/advice-to-young-web-developers
https://archive.org/search.php?query=subject%3A%22The+Navy+Electricity+and+Electronics+Training+Series%22&sort=publicdate
https://googleprojectzero.blogspot.com/2020/06/ff-sandbox-escape-cve-2020-12388.html?m=1
https://apnews.com/1da061ce00eb531291b143ace0eed1c9
https://support.apple.com/library/content/dam/edam/applecare/images/en_US/appleid/android-apple-music-account-payment-none.jpg
https://standpointmag.co.uk/issues/may-june-2020/the-healing-power-of-birdsong/
https://steveblank.com/2020/06/18/the-coming-chip-wars-of-the-21st-century/
https://www.videolan.org/security/sb-vlc3011.html
https://onesignal.com/careers/2023b71d-2f44-4934-a33c-647855816903
https://www.bbc.com/news/world-europe-53006790
https://github.com/efficient/HOPE
https://everytwoyears.org/
https://www.historytoday.com/archive/natural-histories/intelligence-earthworms
https://cr.yp.to/2005-590/powerpc-cwg.pdf
https://quantum.country/
http://www.crystallography.net/cod/
https://parkinsonsnewstoday.com/2020/06/17/tiny-magnetically-powered-implant-may-be-future-of-deep-brain-stimulation/
https://spark.apache.org/releases/spark-release-3-0-0.html
https://arxiv.org/abs/1712.09624
https://www.washingtonpost.com/technology/2020/06/18/data-privacy-law-sherrod-brown/
https://blog.chromium.org/2020/06/improving-chromiums-browser.html

How to get complete href links using beautifulsoup in python

I am trying to get top movies name by genre. I couldn't get complete href links for that, I stuck by getting half href links
By the following code I got,
https://www.imdb.com/search/title?genres=action&sort=user_rating,desc&title_type=feature&num_votes=25000,
https://www.imdb.com/search/title?genres=adventure&sort=user_rating,desc&title_type=feature&num_votes=25000,
https://www.imdb.com/search/title?genres=animation&sort=user_rating,desc&title_type=feature&num_votes=25000,
https://www.imdb.com/search/title?genres=biography&sort=user_rating,desc&title_type=feature&num_votes=25000,
.........
Like that but i want to all top 100 movies name by its genre like action, Adventure, Animation, Biography.......
I tried the following code:
from bs4 import BeautifulSoup
import requests
url = 'https://www.imdb.com'
main_url = url + '/chart/top'
res = requests.get(main_url)
soup = BeautifulSoup(res.text, 'html.parser')
for href in soup.find_all(class_='subnav_item_main'):
# print(href)
all_links = url + href.find('a').get('href')
print(all_links)
I want complete link as shown bellow from a link
/search/title?genres=action&sort=user_rating,desc&title_type=feature&num_votes=25000,&pf_rd_m=A2FGELUUNOQJNL&pf_rd_p=5aab685f-35eb-40f3-95f7-c53f09d542c3&pf_rd_r=FM1ZEBQ7E9KGQSDD441H&pf_rd_s=right-6&pf_rd_t=15506&pf_rd_i=top&ref_=chttp_gnr_1"
You need another loop over those urls and a limit to only get 100. I store in a dictionary with keys being genre and values being a list of films. Note original titles may appear e.g. The Mountain II (2016) is Dag II (original title).
links is a list of tuples where I keep the genre as first item and url as second.
import requests, pprint
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
url = 'https://www.imdb.com/chart/top'
genres = {}
with requests.Session() as s:
r = s.get(url)
soup = bs(r.content, 'lxml')
links = [(i.text, urljoin(url,i['href'])) for i in soup.select('.subnav_item_main a')]
for link in links:
r = s.get(link[1])
soup = bs(r.content, 'lxml')
genres[link[0].strip()] = [i['alt'] for i in soup.select('.loadlate', limit = 100)]
pprint.pprint(genres)
Sample output:

I want my code to not extract links with 0 seeders using python

i wrote my code but it extract all links no matter what value is the seeders count,
here is the code i wrote:
from bs4 import BeautifulSoup
import urllib.request
import re
class AppURLopener(urllib.request.FancyURLopener):
version = "Mozilla/5.0"
url = input('What site you working on today, sir?\n-> ')
opener = AppURLopener()
html_page = opener.open(url)
soup = BeautifulSoup(html_page, "lxml")
pd = str(soup.findAll('td', attrs={'align':re.compile('right')}))
for link in soup.findAll('a', attrs={'href': re.compile("^magnet")}):
if not('0' is pd[18]):
print (link.get('href'),'\n')
and this is the html am working on : https://imgur.com/a/32J9qF4
in this case it's 0 seeders but it still gives me the magnet link.. HELP
This code snippet will extract all magnet links from the page, where seeders != 0:
from bs4 import BeautifulSoup
import requests
from pprint import pprint
soup = BeautifulSoup(requests.get('https://pirateproxy.mx/browse/201/1/3').text, 'lxml')
tds = soup.select('#searchResult td.vertTh ~ td')
links = [name.select_one('a[href^=magnet]')['href'] for name, seeders, leechers in zip(tds[0::3], tds[1::3], tds[2::3]) if seeders.text.strip() != '0']
pprint(links, width=120)
Prints:
['magnet:?xt=urn:btih:aa8a1f7847a49e640638c02ce851effff38d440f&dn=Affairs.of.State.2018.BRRip.x264.AC3-Manning&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
'magnet:?xt=urn:btih:819cb9b477462cd61ab6653ebc4a6f4e790589c3&dn=Bad.Samaritan.2018.BRRip.x264.AC3-Manning&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
'magnet:?xt=urn:btih:843d01992aa81d52be68190ee6a733ec9eee9b13&dn=The+Darkest+Minds+2018+HDCAM-1XBET&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
'magnet:?xt=urn:btih:09a23daa69c42003d905ecf0a1cefdb0474e7d88&dn=Insidious+The+Last+Key+2018+BRRip+x264+AAC-SSN&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
'magnet:?xt=urn:btih:98c42d5d620b4db834c5437a75f6da6f2d158207&dn=The+Darkest+Minds+2018+HDCAM-1XBET%5BTGx%5D&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
'magnet:?xt=urn:btih:f30ebc409b215f2a5237433d7508c7ebfabb0e16&dn=Journeyman.2017.SWESUB.BRRiP.x264.mp4&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Fzer0day.ch%3A1337&tr=udp%3A%2F%2Fopen.demonii.com%3A1337&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fexodus.desync.com%3A6969',
...and so on.
EDIT:
The soup.select('#searchResult td.vertTh ~ td') will select all <td> siblings of tag <td> with class vertTh which is inside tag with id=searchResult. There are three siblings like this in each row.
The select_one('a[href^=magnet]') will then select all links that href begins with magnet.

Scraping with beautifulsoup trying to get all the href attributes

Im trying to scrape all the urls from amazon categories website (https://www.amazon.com/gp/site-directory/ref=nav_shopall_btn)
but I can just get the first url of any category. For example, from "Amazon video" I am getting "All videos", "Fire TV" amazon fire tv, etc.
That is my code:
from bs4 import BeautifulSoup
import requests
url = "https://www.amazon.es/gp/site-directory/ref=nav_shopall_btn"
amazon_link = requests.get(url)
html = BeautifulSoup(amazon_link.text,"html.parser")
categorias_amazon = html.find_all('div',{'class':'popover-grouping'})
for i in range(len(categorias_amazon)):
print("www.amazon.es" + categorias_amazon[i].a['href'])
I have tried with:
print("www.amazon.es" + categorias_amazon[i].find_all['a'])
but I get an error. I am looking to get href attribute of every sub category.
You can try this code:
from bs4 import BeautifulSoup
import requests
url = "https://www.amazon.es/gp/site-directory/ref=nav_shopall_btn"
amazon_link = requests.get(url)
html = BeautifulSoup(amazon_link.text,"html.parser")
# print html
categorias_amazon = html.find_all('div',{'class':'popover-grouping'})
allurls=html.select("div.popover-grouping [href]")
values=[link['href'].strip() for link in allurls]
for value in values:
print("www.amazon.es" + value)
It will print:
www.amazon.es/b?ie=UTF8&node=1748200031
www.amazon.es/gp/dmusic/mp3/player
www.amazon.es/b?ie=UTF8&node=2133385031
www.amazon.es/clouddrive/primephotos
www.amazon.es/clouddrive/home
www.amazon.es/clouddrive/home#download-section
www.amazon.es/clouddrive?_encoding=UTF8&sf=1
www.amazon.es/dp/B0186FET66
www.amazon.es/dp/B00QJDO0QC
www.amazon.es/dp/B00IOY524S
www.amazon.es/dp/B010EK1GOE
www.amazon.es/b?ie=UTF8&node=827234031
www.amazon.es/ebooks-kindle/b?ie=UTF8&node=827231031
www.amazon.es/gp/kindle/ku/sign-up/
www.amazon.es/b?ie=UTF8&node=8504981031
www.amazon.es/gp/digital/fiona/kcp-landing-page
www.amazon.eshttps://www.amazon.es:443/gp/redirect.html?location=https://leer.amazon.es/&token=CA091C61DBBA8A5C0F6E4A46ED30C059164DBC74&source=standards
www.amazon.es/gp/digital/fiona/manage
www.amazon.es/dp/B00ZDWLEEG
www.amazon.es/dp/B00IRKMZX0
www.amazon.es/dp/B01AHBC23E
www.amazon.es/b?ie=UTF8&node=827234031
www.amazon.es/mobile-apps/b?ie=UTF8&node=1661649031
www.amazon.es/b?ie=UTF8&node=1726755031
www.amazon.es/b?ie=UTF8&node=1748200031
www.amazon.es/ebooks-kindle/b?ie=UTF8&node=827231031
www.amazon.es/gp/digital/fiona/manage
www.amazon.es/b?ie=UTF8&node=10909716031
www.amazon.es/b?ie=UTF8&node=10909718031
www.amazon.es/b?ie=UTF8&node=10909719031
www.amazon.es/b?ie=UTF8&node=10909720031
www.amazon.es/b?ie=UTF8&node=10909721031
www.amazon.es/b?ie=UTF8&node=10909722031
www.amazon.es/b?ie=UTF8&node=8464150031
www.amazon.es/mobile-apps/b?ie=UTF8&node=1661649031
www.amazon.es/b?ie=UTF8&node=1726755031
www.amazon.es/b?ie=UTF8&node=4622953031
www.amazon.es/gp/feature.html?ie=UTF8&docId=1000658923
www.amazon.es/gp/mas/your-account/myapps
www.amazon.es/comprar-libros-espa%C3%B1ol/b?ie=UTF8&node=599364031
www.amazon.es/ebooks-kindle/b?ie=UTF8&node=827231031
www.amazon.es/gp/kindle/ku/sign-up/
www.amazon.es/Libros-en-ingl%C3%A9s/b?ie=UTF8&node=665418031
www.amazon.es/Libros-en-otros-idiomas/b?ie=UTF8&node=599367031
www.amazon.es/b?ie=UTF8&node=902621031
www.amazon.es/libros-texto/b?ie=UTF8&node=902673031
www.amazon.es/Blu-ray-DVD-peliculas-series-3D/b?ie=UTF8&node=599379031
www.amazon.es/series-tv-television-DVD-Blu-ray/b?ie=UTF8&node=665293031
www.amazon.es/Blu-ray-peliculas-series-3D/b?ie=UTF8&node=665303031
www.amazon.es/M%C3%BAsica/b?ie=UTF8&node=599373031
www.amazon.es/b?ie=UTF8&node=1748200031
www.amazon.es/musical-instruments/b?ie=UTF8&node=3628866031
www.amazon.es/fotografia-videocamaras/b?ie=UTF8&node=664660031
www.amazon.es/b?ie=UTF8&node=931491031
www.amazon.es/tv-video-home-cinema/b?ie=UTF8&node=664659031
www.amazon.es/b?ie=UTF8&node=664684031
www.amazon.es/gps-accesorios/b?ie=UTF8&node=664661031
www.amazon.es/musical-instruments/b?ie=UTF8&node=3628866031
www.amazon.es/accesorios/b?ie=UTF8&node=928455031
www.amazon.es/Inform%C3%A1tica/b?ie=UTF8&node=667049031
www.amazon.es/Electr%C3%B3nica/b?ie=UTF8&node=599370031
www.amazon.es/portatiles/b?ie=UTF8&node=938008031
www.amazon.es/tablets/b?ie=UTF8&node=938010031
www.amazon.es/ordenadores-sobremesa/b?ie=UTF8&node=937994031
www.amazon.es/componentes/b?ie=UTF8&node=937912031
www.amazon.es/b?ie=UTF8&node=2457643031
www.amazon.es/b?ie=UTF8&node=2457641031
www.amazon.es/Software/b?ie=UTF8&node=599376031
www.amazon.es/pc-videojuegos-accesorios-mac/b?ie=UTF8&node=665498031
www.amazon.es/Inform%C3%A1tica/b?ie=UTF8&node=667049031
www.amazon.es/material-oficina/b?ie=UTF8&node=4352791031
www.amazon.es/productos-papel-oficina/b?ie=UTF8&node=4352794031
www.amazon.es/boligrafos-lapices-utiles-escritura/b?ie=UTF8&node=4352788031
www.amazon.es/electronica-oficina/b?ie=UTF8&node=4352790031
www.amazon.es/oficina-papeleria/b?ie=UTF8&node=3628728031
www.amazon.es/videojuegos-accesorios-consolas/b?ie=UTF8&node=599382031
www.amazon.es/b?ie=UTF8&node=665290031
www.amazon.es/pc-videojuegos-accesorios-mac/b?ie=UTF8&node=665498031
www.amazon.es/b?ie=UTF8&node=8490963031
www.amazon.es/b?ie=UTF8&node=1381541031
www.amazon.es/Juguetes-y-juegos/b?ie=UTF8&node=599385031
www.amazon.es/bebe/b?ie=UTF8&node=1703495031
www.amazon.es/baby-reg/homepage
www.amazon.es/gp/family/signup
www.amazon.es/b?ie=UTF8&node=2181872031
www.amazon.es/b?ie=UTF8&node=3365351031
www.amazon.es/bano/b?ie=UTF8&node=3244779031
www.amazon.es/b?ie=UTF8&node=1354952031
www.amazon.es/iluminacion/b?ie=UTF8&node=3564289031
www.amazon.es/pequeno-electrodomestico/b?ie=UTF8&node=2165363031
www.amazon.es/aspiracion-limpieza-planchado/b?ie=UTF8&node=2165650031
www.amazon.es/almacenamiento-organizacion/b?ie=UTF8&node=3359926031
www.amazon.es/climatizacion-calefaccion/b?ie=UTF8&node=3605952031
www.amazon.es/Hogar/b?ie=UTF8&node=599391031
www.amazon.es/herramientas-electricas-mano/b?ie=UTF8&node=3049288031
www.amazon.es/Cortacespedes-Tractores-Jardineria/b?ie=UTF8&node=3249445031
www.amazon.es/instalacion-electrica/b?ie=UTF8&node=3049284031
www.amazon.es/accesorios-cocina-bano/b?ie=UTF8&node=3049286031
www.amazon.es/seguridad/b?ie=UTF8&node=3049292031
www.amazon.es/Bricolaje-Herramientas-Fontaneria-Ferreteria-Jardineria/b?ie=UTF8&node=2454133031
www.amazon.es/Categorias/b?ie=UTF8&node=6198073031
www.amazon.es/b?ie=UTF8&node=6348071031
www.amazon.es/Categorias/b?ie=UTF8&node=6198055031
www.amazon.es/b?ie=UTF8&node=12300685031
www.amazon.es/Salud-y-cuidado-personal/b?ie=UTF8&node=3677430031
www.amazon.es/Suscribete-Ahorra/b?ie=UTF8&node=9699700031
www.amazon.es/Amazon-Pantry/b?ie=UTF8&node=10547412031
www.amazon.es/moda-mujer/b?ie=UTF8&node=5517558031
www.amazon.es/moda-hombre/b?ie=UTF8&node=5517557031
www.amazon.es/moda-infantil/b?ie=UTF8&node=5518995031
www.amazon.es/bolsos-mujer/b?ie=UTF8&node=2007973031
www.amazon.es/joyeria/b?ie=UTF8&node=2454126031
www.amazon.es/relojes/b?ie=UTF8&node=599388031
www.amazon.es/equipaje/b?ie=UTF8&node=2454129031
www.amazon.es/gp/feature.html?ie=UTF8&docId=12464607031
www.amazon.es/b?ie=UTF8&node=8520792031
www.amazon.es/running/b?ie=UTF8&node=2928523031
www.amazon.es/fitness-ejercicio/b?ie=UTF8&node=2928495031
www.amazon.es/ciclismo/b?ie=UTF8&node=2928487031
www.amazon.es/tenis-padel/b?ie=UTF8&node=2985165031
www.amazon.es/golf/b?ie=UTF8&node=2928503031
www.amazon.es/deportes-equipo/b?ie=UTF8&node=2975183031
www.amazon.es/deportes-acuaticos/b?ie=UTF8&node=2928491031
www.amazon.es/deportes-invierno/b?ie=UTF8&node=2928493031
www.amazon.es/Tiendas-campa%C3%B1a-Sacos-dormir-Camping/b?ie=UTF8&node=2928471031
www.amazon.es/deportes-aire-libre/b?ie=UTF8&node=2454136031
www.amazon.es/ropa-calzado-deportivo/b?ie=UTF8&node=2975170031
www.amazon.es/calzado-deportivo/b?ie=UTF8&node=2928484031
www.amazon.es/electronica-dispositivos-el-deporte/b?ie=UTF8&node=2928496031
www.amazon.es/Coche-y-moto/b?ie=UTF8&node=1951051031
www.amazon.es/b?ie=UTF8&node=2566955031
www.amazon.es/gps-accesorios/b?ie=UTF8&node=664661031
www.amazon.es/Motos-accesorios-piezas/b?ie=UTF8&node=2425161031
www.amazon.es/industrial-cientfica/b?ie=UTF8&node=5866088031
www.amazon.es/b?ie=UTF8&node=6684191031
www.amazon.es/b?ie=UTF8&node=6684193031
www.amazon.es/b?ie=UTF8&node=6684192031
www.amazon.es/handmade/b?ie=UTF8&node=9699482031
www.amazon.es/b?ie=UTF8&node=10740508031
www.amazon.es/b?ie=UTF8&node=10740511031
www.amazon.es/b?ie=UTF8&node=10740559031
www.amazon.es/b?ie=UTF8&node=10740502031
www.amazon.es/b?ie=UTF8&node=10740505031
Hope this is what you were looking for.
Do you want to scrapp it or scrape it? If it's the latter, that about this?
from BeautifulSoup import BeautifulSoup
import urllib2
import re
html_page = urllib2.urlopen("https://www.amazon.es/gp/site-directory/ref=nav_shopall_btn")
soup = BeautifulSoup(html_page)
for link in soup.findAll('a'):
print link.get('href')

Categories

Resources