Function not returning a list - python

I have some code which creates a list of the links on a website. At the end of the function I return the list, but when I reference it later on, the list is blank.
Here is my code:
PagesList = []
Startup = input('What is the website of the startup?')
def GetCleanLinks(Startup):
headers = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(Startup, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
PagesList = [a['href'] for a in soup.find_all('a', href=True) if a.text.strip()]
for i, link in enumerate(PagesList):
if link[0] in ['/','#']:
PagesList[i] = Startup + link
print(PagesList)
return(PagesList)
GetCleanLinks(Startup)
print(PagesList)
If I put the print within the function (before my return request) it will print out a list of the links. However, the print request outside the function prints a blank list. What am I missing?
Many thanks,
Rob

Related

can anyone tell me why (crux-component-title) is used and where it is taken from

this code is good but i do not understand some things
import requests
from bs4 import BeautifulSoup
def get_products(url):
soup = BeautifulSoup(requests.get(url).content, "html.parser")
out = []
for title in soup.select(".crux-component-title"):
out.append(title.get_text(strip=True))
return out
url = "https://www.consumerreports.org/cro/coffee-makers.htm"
soup = BeautifulSoup(requests.get(url).content, "html.parser")
all_data = []
for category_link in soup.select("h3.crux-product-title a"):
u = "https://www.consumerreports.org" + category_link["href"]
print("Getting {}".format(u))
all_data.extend(get_products(u))
for i, title in enumerate(all_data, 1):
print("{:<5} {}".format(i, title))
i did not get that crux-component-title is used and where is it came from
The crux-component-title comes from the page that is obtained in the "loop" and passed in the get_products function.
This is your code:
# Loop the links found in the anchor HTML tag "a"
# that are inside the "h3" tag:
for category_link in soup.select("h3.crux-product-title a"):
# Get the "href" value from the link:
u = "https://www.consumerreports.org" + category_link["href"]
The following line calls the get_products function that makes a request to the page (i.e. the url of the page obtained in the loop ):
all_data.extend(get_products(u))
In the get_products function, the code gets the titles found in the page passed in the u parameter and those titles are contained in an HTML element with the crux-component-title class:
for title in soup.select(".crux-component-title"):
out.append(title.get_text(strip=True))

See what for loop has checked

I don't really know what to call this issue, sorry for the undescriptive title.
My program checks if a element exists on multiple paths of a website. The program has a base url that gets different paths of the domain to check, which are located in a json file (name.json).
In this current state of my program, it prints 1 if the element is found and 2 if not. I want it to print the url instead of 1 or 2. But my problem is that the id's gets saved before the final for loop. When trying to print fullurl I'm only getting the last id in my json file printed multiple times(because it isnt being saved), instead of the unique url.
import json
import grequests
from bs4 import BeautifulSoup
idlist = json.loads(open('name.json').read())
baseurl = 'https://steamcommunity.com/id/'
complete_urls = []
for uid in idlist:
fullurl = baseurl + uid
complete_urls.append(fullurl)
rs = (grequests.get(fullurl) for fullurl in complete_urls)
resp = grequests.map(rs)
for r in resp:
soup = BeautifulSoup(r.text, 'lxml')
if soup.find('span', class_='actual_persona_name'):
print('1')
else:
print('2')
Since the grequests.map return the responses in order of requests (see this), you can match the fullurl of each request to a response using enumerate.
import json
import grequests
from bs4 import BeautifulSoup
idlist = json.loads(open('name.json').read())
baseurl = 'https://steamcommunity.com/id/'
for uid in idlist:
fullurl = baseurl + uid
complete_urls = []
for uid in idlist:
fullurl = baseurl + uid
complete_urls.append(fullurl)
rs = (grequests.get(fullurl) for fullurl in complete_urls)
resp = grequests.map(rs)
for index,r in enumerate(resp): # use enumerate to get the index of response
soup = BeautifulSoup(r.text, 'lxml')
print(complete_urls[index]) # using the index of responses to access the already existing list of complete_urls
if soup.find('span', class_='actual_persona_name'):
print('1')
else:
print('2')
If I undertstood correctly you could just print(r.url) instead of the numbers since the fullurl is stored inside each response object.
for r in resp:
soup = BeautifulSoup(r.text, 'lxml')
if soup.find('span', class_='actual_persona_name'):
print(r.url)
else:
print(r.url)

Python - parsing html response

In my code, a user inputs a search term and the get_all_links parses the html response and extract the links that start with ‘http’. When req is replaced with a hard coded url such as:
content = urllib.request.urlopen("http://www.ox.ac.uk")
The program returns a list of properly formatted links correctly. However passing in req, no links are returned. I suspect this may be a formatting blip.
Here is my code:
import urllib.request
def get_all_links(s): # function to get all the links
d=0
links=[] # getting all links into a list
while d!=-1: # untill d is -1. i.e no links in that page
d=s.find('<a href=',d) # if <a href is found
start=s.find('"',d) # stsrt will be the next character
end=s.find('"',start+1) # end will be upto "
if d!=-1: # d is not -1
d+=1
if(s[start+1]=='h'): # add the link which starts with http only.
links.append(s[start+1:end]) # to link list
return links # return list
def main():
term = input('Enter a search term: ')
url = 'http://www.google.com/search'
value = {'q' : term}
user_agent = 'Mozilla/5.0'
headers = {'User-Agent' : user_agent}
data = urllib.parse.urlencode(value)
print(data)
url = url + '?' + data
print(url)
req = urllib.request.Request(url, None, headers)
content = urllib.request.urlopen(req)
s = content.read()
print(s)
links = get_all_links(s.decode('utf-8'))
for i in links: # print the returned list.
print(i)
main()
You should use a HTML parser, as suggested in the comments. A library like BeautifulSoup is perfect for this.
I have adapted your code to use BeautifulSoup
import urllib.request
from bs4 import BeautifulSoup
def get_all_links(s):
soup = BeautifulSoup(s, "html.parser")
return soup.select("a[href^=\"http\"]") # Select all anchor tags whose href attribute starts with 'http'
def main():
term = input('Enter a search term: ')
url = 'http://www.google.com/search'
value = {'q' : term}
user_agent = 'Mozilla/5.0'
headers = {'User-Agent' : user_agent}
data = urllib.parse.urlencode(value)
print(data)
url = url + '?' + data
print(url)
req = urllib.request.Request(url, None, headers)
content = urllib.request.urlopen(req)
s = content.read()
print(s)
links = get_all_links(s.decode('utf-8'))
for i in links: # print the returned list.
print(i)
main()
It uses the select method of the BeautifulSoup library and returns a list of selected elements (in your case anchor-tags).
Using a library like BeautifulSoup not only makes it easier, but you can also use much more complex selections. Imagine how you would have to change your code when you wanted to select all links whose href attribute contains the word "google" or "code"?
You can read the BeautifulSoup documentation here.

Convert a result in the console into a list Python

I want to web scrape a list of urls from a web site and then open them one by one.
I can get the list of all urls but then try to turn into a list things get wrong.
When I print the list, intead of geting [url1, urls2...] I get something like this in the console:
[url1,url2,url3] dif line
[url1,url2,url3,url4] difline
[url1,url2,url3,url4,url5]
Find the my script bellow:
driver = webdriver.Chrome()
my_url="https://prog.nfz.gov.pl/app-jgp/AnalizaPrzekrojowa.aspx"
driver.get(my_url)
time.sleep(3)
content = driver.page_source.encode('utf-8').strip()
page_soup = soup(content,"html.parser")
links =[]
for link in page_soup.find_all('a', href=True):
url=link['href']
ai=str(url)
links.append(ai)
print(links)
links.append(ai)
print(links)
I have rewritten your code a little. First you need to load and scrap main page to get all links from "href". After that just use scraped urls in a loop to get next pages.
Also there is some junk in "href" which isn't url so you have to clean it first.
I prefer requests to do GET.
http://docs.python-requests.org/en/master/
I hope it helps.
from bs4 import BeautifulSoup
import requests
def main():
links = []
url = "https://prog.nfz.gov.pl/app-jgp/AnalizaPrzekrojowa.aspx"
web_page = requests.get(url)
soup = BeautifulSoup(web_page.content, "html.parser")
a_tags = soup.find_all('a', href=True)
for a in a_tags:
links.append(a.get("href"))
print(links) # just to demonstrate that links are there
cleaned_list = []
for link in links:
if "http" in link:
cleaned_list.append(link)
print(cleaned_list)
return cleaned_list
def load_pages_from_links(urls):
user_agent = {'User-agent': 'Mozilla/5.0'}
links = urls
downloaded_pages = {}
if len(links) == 0:
return "There are no links."
else:
for nr, link in enumerate(links):
web_page = requests.get(link, headers=user_agent)
downloaded_pages[nr] = web_page.content
print(downloaded_pages)
if __name__ == "__main__":
links = main()
load_pages_from_links(links)

Filtering certain items from a python list of links generated by beatifulsoup

I am writing a web scraper to scrape some information off of the website JW Pepper for a sheet music database. I am using BeautifulSoup and python to do this.
Here is my code:
# a barebones program I created to scrape the description and audio file off the JW pepper website, will eventually be used in a music database
import urllib2
import re
from bs4 import BeautifulSoup
linkgot = 0
def linkget():
search = "http://www.jwpepper.com/sheet-music/search.jsp?keywords=" # this is the url without the keyword that comes up when searching something
print("enter the name of the desired piece")
keyword = raw_input("> ") # this will add the keyword to the url
url = search + keyword
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
all_links = soup.findAll("a")
link_dict = []
item_dict = []
for link in all_links:
link_dict.append(link.get('href')) # adds a list of the the links found on the page to link_dict
item_dict.append(x for x in link_dict if '.item' in x) #sorts them occording to .item
print item_dict
linkget()
The "print" command returns this: [ at 0x10ec6dc80>], which returns nothing when I google it.
Your filtering of the list was going wrong. Rather than filter in a seperate loop, you could just build the list if .item is present as follows:
from bs4 import BeautifulSoup
import urllib2
def linkget():
search = "http://www.jwpepper.com/sheet-music/search.jsp?keywords=" # this is the url without the keyword that comes up when searching something
print("enter the name of the desired piece")
keyword = raw_input("> ") # this will add the keyword to the url
url = search + keyword
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, "html.parser")
link_dict = []
item_dict = []
for link in soup.findAll("a", href=True):
href = link.get('href')
link_dict.append(href) # adds a list of the the links found on the page to link_dict
if '.item' in href:
item_dict.append(href)
for href in item_dict:
print href
linkget()
Giving you something like:
/Festival-of-Carols/4929683.item
/Festival-of-Carols/4929683.item
/Festival-of-Carols/4929683.item
...

Categories

Resources