Generate a table of contents from HTML with Python - python

I'm trying to generate a table of contents from a block of HTML (not a complete file - just content) based on its <h2> and <h3> tags.
My plan so far was to:
Extract a list of headers using beautifulsoup
Use a regex on the content to place anchor links before/inside the header tags (so the user can click on the table of contents) -- There might be a method for replacing inside beautifulsoup?
Output a nested list of links to the headers in a predefined spot.
It sounds easy when I say it like that, but it's proving to be a bit of a pain in the rear.
Is there something out there that does all this for me in one go so I don't waste the next couple of hours reinventing the wheel?
A example:
<p>This is an introduction</p>
<h2>This is a sub-header</h2>
<p>...</p>
<h3>This is a sub-sub-header</h3>
<p>...</p>
<h2>This is a sub-header</h2>
<p>...</p>

Some quickly hacked ugly piece of code:
soup = BeautifulSoup(html)
toc = []
header_id = 1
current_list = toc
previous_tag = None
for header in soup.findAll(['h2', 'h3']):
header['id'] = header_id
if previous_tag == 'h2' and header.name == 'h3':
current_list = []
elif previous_tag == 'h3' and header.name == 'h2':
toc.append(current_list)
current_list = toc
current_list.append((header_id, header.string))
header_id += 1
previous_tag = header.name
if current_list != toc:
toc.append(current_list)
def list_to_html(lst):
result = ["<ul>"]
for item in lst:
if isinstance(item, list):
result.append(list_to_html(item))
else:
result.append('<li>%s</li>' % item)
result.append("</ul>")
return "\n".join(result)
# Table of contents
print list_to_html(toc)
# Modified HTML
print soup

Use lxml.html.
It can deal with invalid html just fine.
It is very fast.
It allows you to easily create the missing elements and move elements around between the trees.

I have come with an extended version of the solution proposed by Łukasz's.
def list_to_html(lst):
result = ["<ul>"]
for item in lst:
if isinstance(item, list):
result.append(list_to_html(item))
else:
result.append('<li>{}</li>'.format(item[0], item[1]))
result.append("</ul>")
return "\n".join(result)
soup = BeautifulSoup(article, 'html5lib')
toc = []
h2_prev = 0
h3_prev = 0
h4_prev = 0
h5_prev = 0
for header in soup.findAll(['h2', 'h3', 'h4', 'h5', 'h6']):
data = [(slugify(header.string), header.string)]
if header.name == "h2":
toc.append(data)
h3_prev = 0
h4_prev = 0
h5_prev = 0
h2_prev = len(toc) - 1
elif header.name == "h3":
toc[int(h2_prev)].append(data)
h3_prev = len(toc[int(h2_prev)]) - 1
elif header.name == "h4":
toc[int(h2_prev)][int(h3_prev)].append(data)
h4_prev = len(toc[int(h2_prev)][int(h3_prev)]) - 1
elif header.name == "h5":
toc[int(h2_prev)][int(h3_prev)][int(h4_prev)].append(data)
h5_prev = len(toc[int(h2_prev)][int(h3_prev)][int(h4_prev)]) - 1
elif header.name == "h6":
toc[int(h2_prev)][int(h3_prev)][int(h4_prev)][int(h5_prev)].append(data)
toc_html = list_to_html(toc)

How do I generate a table of contents for HTML text in Python?
But I think you are on the right track and reinventing the wheel will be fun.

Related

How to scrape only one price?

I'm trying to scrape product prices from a website and both real price and the monthly payment quota value has exactly the same class, so I can't figure it out how to only get main price.
and this is for the main price: "879.990"
this is for the monthly payment quota: "39.990",
this is the URL: https://listado.mercadolibre.cl/macbook#D[A:macbook]
#THIS GETS ALL THE NAMES AND STORES IT IN A LIST
prod = soup.find_all('h2', class_ ='ui-search-item__title shops__item-title')
productos = list()
count=0
for i in prod:
if count < 33:
productos.append(i.text)
else:
break
count +=1
size= len(productos) +1
#print(size)
#print(productos, len(productos))
print(productos)
#THIS GETS ALL THE NAMES AND STORES IT IN A LIST
pri = soup.find_all('span',class_ ="price-tag-fraction")
precios = list()
count=0
for i in pri:
if count < 33:
precios.append(i.text)
else:
break
count +=1
#rint(precios)
prices= [item.split(',')for item in precios]
Here is the output
You can filter out the other prices using CSS selectors
# filsel = 'span.price-tag-fraction:not(span.ui-search-installments span):not(s.price-tag__disabled span)'
emiSp_sel = 'span.ui-search-installments span' # monthly
disab_sel = 's.price-tag__disabled span' # crossed out
filsel = f'span.price-tag-fraction:not({emiSp_sel}):not({disab_sel})'
pri = [p.get_text() for p in soup.select(filsel)]
or using lambda with find
pri = soup.find_all(
lambda p: p.name == 'span' and 'price-tag-fraction' in p.get('class', '')
and p.find_parent('span', {'class': 'ui-search-installments'}) is None
and p.find_parent('s', {'class': 'price-tag__disabled'}) is None
)
or even by combining lists comprehension with your current method
pri = [
p for p in soup.find_all('span',class_ ="price-tag-fraction")
if p.find_parent('span', {'class': 'ui-search-installments'}) is None
and p.find_parent('s', {'class': 'price-tag__disabled'}) is None
]

What is the best way to parse large XML and genarate a dataframe with the data in the XML (with python or else)?

I try to make a table (or csv, I'm using pandas dataframe) from the information of an XML file.
The file is here (.zip is 14 MB, XML is ~370MB), https://nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.zip . It has package information of different languages - node.js, python, java etc. aka, CPE 2.3 list by the US government org NVD.
this is how it looks like in the first 30 rows:
<cpe-list xmlns:config="http://scap.nist.gov/schema/configuration/0.1" xmlns="http://cpe.mitre.org/dictionary/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:scap-core="http://scap.nist.gov/schema/scap-core/0.3" xmlns:cpe-23="http://scap.nist.gov/schema/cpe-extension/2.3" xmlns:ns6="http://scap.nist.gov/schema/scap-core/0.1" xmlns:meta="http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2" xsi:schemaLocation="http://scap.nist.gov/schema/cpe-extension/2.3 https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary-extension_2.3.xsd http://cpe.mitre.org/dictionary/2.0 https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary_2.3.xsd http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2 https://scap.nist.gov/schema/cpe/2.1/cpe-dictionary-metadata_0.2.xsd http://scap.nist.gov/schema/scap-core/0.3 https://scap.nist.gov/schema/nvd/scap-core_0.3.xsd http://scap.nist.gov/schema/configuration/0.1 https://scap.nist.gov/schema/nvd/configuration_0.1.xsd http://scap.nist.gov/schema/scap-core/0.1 https://scap.nist.gov/schema/nvd/scap-core_0.1.xsd">
<generator>
<product_name>National Vulnerability Database (NVD)</product_name>
<product_version>4.9</product_version>
<schema_version>2.3</schema_version>
<timestamp>2022-03-17T03:51:01.909Z</timestamp>
</generator>
<cpe-item name="cpe:/a:%240.99_kindle_books_project:%240.99_kindle_books:6::~~~android~~">
<title xml:lang="en-US">$0.99 Kindle Books project $0.99 Kindle Books (aka com.kindle.books.for99) for android 6.0</title>
<references>
<reference href="https://play.google.com/store/apps/details?id=com.kindle.books.for99">Product information</reference>
<reference href="https://docs.google.com/spreadsheets/d/1t5GXwjw82SyunALVJb2w0zi3FoLRIkfGPc7AMjRF0r4/edit?pli=1#gid=1053404143">Government Advisory</reference>
</references>
<cpe-23:cpe23-item name="cpe:2.3:a:\$0.99_kindle_books_project:\$0.99_kindle_books:6:*:*:*:*:android:*:*"/>
</cpe-item>
The tree structure of the XML file is quite simple, the root is 'cpe-list', the child element is 'cpe-item', and the grandchild elements are 'title', 'references' and 'cpe23-item'.
From 'title', I want the text in the element;
From 'cpe23-item', I want the attribute 'name';
From 'references', I want the attributes 'href' from its great-grandchildren, 'reference'.
The dataframe should look like this:
| cpe23_name | title_text | ref1 | ref2 | ref3 | ref_other
0 | 'cpe23name 1'| 'this is a python pkg'| 'url1'| 'url2'| NaN | NaN
1 | 'cpe23name 2'| 'this is a java pkg' | 'url1'| 'url2'| NaN | NaN
...
my code is here,finished in ~100sec:
import xml.etree.ElementTree as et
xtree = et.parse("official-cpe-dictionary_v2.3.xml")
xroot = xtree.getroot()
import time
start_time = time.time()
df_cols = ["cpe", "text", "vendor", "product", "version", "changelog", "advisory", 'others']
title = '{http://cpe.mitre.org/dictionary/2.0}title'
ref = '{http://cpe.mitre.org/dictionary/2.0}references'
cpe_item = '{http://scap.nist.gov/schema/cpe-extension/2.3}cpe23-item'
p_cpe = None
p_text = None
p_vend = None
p_prod = None
p_vers = None
p_chan = None
p_advi = None
p_othe = None
rows = []
i = 0
while i < len(xroot):
for elm in xroot[i]:
if elm.tag == title:
p_text = elm.text
#assign p_text
elif elm.tag == ref:
for nn in elm:
s = nn.text.lower()
#check the lower text in refs
if 'version' in s:
p_vers = nn.attrib.get('href')
#assign p_vers
elif 'advisor' in s:
p_advi = nn.attrib.get('href')
#assign p_advi
elif 'product' in s:
p_prod = nn.attrib.get('href')
#assign p_prod
elif 'vendor' in s:
p_vend = nn.attrib.get('href')
#assign p_vend
elif 'change' in s:
p_chan = nn.attrib.get('href')
#assign p_vend
else:
p_othe = nn.attrib.get('href')
elif elm.tag == cpe_item:
p_cpe = elm.attrib.get("name")
#assign p_cpe
else:
print(elm.tag)
row = [p_cpe, p_text, p_vend, p_prod, p_vers, p_chan, p_advi, p_othe]
rows.append(row)
p_cpe = None
p_text = None
p_vend = None
p_prod = None
p_vers = None
p_chan = None
p_advi = None
p_othe = None
print(len(rows)) #this shows how far I got during the running time
i+=1
out_df1 = pd.DataFrame(rows, columns = df_cols)# move this part outside the loop by removing the indent
print("---853k rows take %s seconds ---" % (time.time() - start_time))
updated: the faster way is to move the 2nd last row out side the loop. Since 'rows' already get info in each loop, there is no need to make a new dataframe every time.
the running time now is 136.0491042137146 seconds. yay!
Since your XML is fairly flat, consider the recently added IO module, pandas.read_xml introduced in v1.3. Given XML uses a default namespace, to reference elements in xpath use namespaces argument:
url = "https://nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.zip"
df = pd.read_xml(
url, xpath=".//doc:cpe-item", namespaces={'doc': 'http://cpe.mitre.org/dictionary/2.0'}
)
If you do not have the default parser, lxml, installed, use the etree parser:
df = pd.read_xml(
url, xpath=".//doc:cpe-item", namespaces={'doc': 'http://cpe.mitre.org/dictionary/2.0'}, parser="etree"
)

Issue with Beautifulsoup .find(text=true)

for row in soup.find_all('tr'):
cells = row.find_all('td')
if len(cells)==10: #Only extract table body not heading
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find('div').get('title'))
D.append(cells[3].find('a', href=True).get_text())
E.append(cells[4].find('a', href=True).get_text())
if cells[5].find(text=True) is None or cells[5].find('a', href=True) is None:
F.append(cells[5].find(text=True))
else:
Output = '-'.join([item.get_text() for item in cells[5].find_all('a')])
F.append(Output)
if cells[6].find(text=True) is None or cells[6].find('a', href=True) is None:
G.append(cells[6].find(text=True))
else:
G.append(cells[6].find('a', href=True).get_text())
if cells[7].find(text=True) is None or cells[7].find('a', href=True) is None:
H.append(cells[7].find(text=True))
else:
H.append(cells[7].find('a', href=True).get_text())
I.append(cells[8].find('span').get_text())
J.append(cells[9].find(Title=True))
The problem is that at cells 5,6 and 7 the desired output is sometimes inside a ahref tag and sometimes inside a td tag. The code works but the List F f.e. looks something like this:
0
T-001
1
TD-U1B
2 BMA-D2-USA
3 BMU-D3-USA
4
Position 2 and 3 are correct. These are the outputs from:
else:
Output = '-'.join([item.get_text() for item in cells[5].find_all('a')])
F.append(Output)
Position 0 and 1 are incorrect. These are the outputs from:
F.append(cells[5].find(text=True))

How to scrape and extract all the subcategories names from all its associated pages for a wikipedia category using python 3.6?

I want to scrape all the subcategories and pages under the category header of the Category page: "Category:Computer science". The link for the same is as follows: http://en.wikipedia.org/wiki/Category:Computer_science.
I have got an idea regarding the above mentioned problem, from the following stack overflow answer which is specified in the following link.
Pythonic beautifulSoup4 : How to get remaining titles from the next page link of a wikipedia category
and
How to scrape Subcategories and pages in categories of a Category wikipedia page using Python
However, the answer do not fully solves the problem. It only scrapes the Pages in category "Computer science". But, I want to extract all the subcategories names and its associated pages. I want the process should report the results in BFS manner with a depth of 10. Is there exist any way to do this?
I found the following code from this linked post:
from pprint import pprint
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import requests
base_url = 'https://en.wikipedia.org/wiki/Category:Computer science'
def get_next_link(soup):
return soup.find("a", text="next page")
def extract_links(soup):
return [a['title'] for a in soup.select("#mw-pages li a")]
with requests.Session() as session:
content = session.get(base_url).content
soup = BeautifulSoup(content, 'lxml')
links = extract_links(soup)
next_link = get_next_link(soup)
while next_link is not None: # while there is a Next Page link
url = urljoin(base_url, next_link['href'])
content = session.get(url).content
soup = BeautifulSoup(content, 'lxml')
links += extract_links(soup)
next_link = get_next_link(soup)
pprint(links)
To scrape the subcategories, you will have to use selenium to interact with the dropdowns. A simple traversal over the second category of links will yield the pages, however, to find all the subcategories, recursion is needed to properly group the data. The code below utilizes a simple variant of the breadth-first search to determine when to stop looping over the dropdown toggle objects generated at each iteration of the while loop:
from selenium import webdriver
import time
from bs4 import BeautifulSoup as soup
def block_data(_d):
return {_d.find('h3').text:[[i.a.attrs.get('title'), i.a.attrs.get('href')] for i in _d.find('ul').find_all('li')]}
def get_pages(source:str) -> dict:
return [block_data(i) for i in soup(source, 'html.parser').find('div', {'id':'mw-pages'}).find_all('div', {'class':'mw-category-group'})]
d = webdriver.Chrome('/path/to/chromedriver')
d.get('https://en.wikipedia.org/wiki/Category:Computer_science')
all_pages = get_pages(d.page_source)
_seen_categories = []
def get_categories(source):
return [[i['href'], i.text] for i in soup(source, 'html.parser').find_all('a', {'class':'CategoryTreeLabel'})]
def total_depth(c):
return sum(1 if len(b) ==1 and not b[0] else sum([total_depth(i) for i in b]) for a, b in c.items())
def group_categories(source) -> dict:
return {i.find('div', {'class':'CategoryTreeItem'}).a.text:(lambda x:None if not x else [group_categories(c) for c in x])(i.find_all('div', {'class':'CategoryTreeChildren'})) for i in source.find_all('div', {'class':'CategoryTreeSection'})}
while True:
full_dict = group_categories(soup(d.page_source, 'html.parser'))
flag = False
for i in d.find_elements_by_class_name('CategoryTreeToggle'):
try:
if i.get_attribute('data-ct-title') not in _seen_categories:
i.click()
flag = True
time.sleep(1)
except:
pass
else:
_seen_categories.append(i.get_attribute('data-ct-title'))
if not flag:
break
Output:
all_pages:
[{'\xa0': [['Computer science', '/wiki/Computer_science'], ['Glossary of computer science', '/wiki/Glossary_of_computer_science'], ['Outline of computer science', '/wiki/Outline_of_computer_science']]},
{'B': [['Patrick Baudisch', '/wiki/Patrick_Baudisch'], ['Boolean', '/wiki/Boolean'], ['Business software', '/wiki/Business_software']]},
{'C': [['Nigel A. L. Clarke', '/wiki/Nigel_A._L._Clarke'], ['CLEVER score', '/wiki/CLEVER_score'], ['Computational human modeling', '/wiki/Computational_human_modeling'], ['Computational social choice', '/wiki/Computational_social_choice'], ['Computer engineering', '/wiki/Computer_engineering'], ['Critical code studies', '/wiki/Critical_code_studies']]},
{'I': [['Information and computer science', '/wiki/Information_and_computer_science'], ['Instance selection', '/wiki/Instance_selection'], ['Internet Research (journal)', '/wiki/Internet_Research_(journal)']]},
{'J': [['Jaro–Winkler distance', '/wiki/Jaro%E2%80%93Winkler_distance'], ['User:JUehV/sandbox', '/wiki/User:JUehV/sandbox']]},
{'K': [['Krauss matching wildcards algorithm', '/wiki/Krauss_matching_wildcards_algorithm']]},
{'L': [['Lempel-Ziv complexity', '/wiki/Lempel-Ziv_complexity'], ['Literal (computer programming)', '/wiki/Literal_(computer_programming)']]},
{'M': [['Machine learning in bioinformatics', '/wiki/Machine_learning_in_bioinformatics'], ['Matching wildcards', '/wiki/Matching_wildcards'], ['Sidney Michaelson', '/wiki/Sidney_Michaelson']]},
{'N': [['Nuclear computation', '/wiki/Nuclear_computation']]}, {'O': [['OpenCV', '/wiki/OpenCV']]},
{'P': [['Philosophy of computer science', '/wiki/Philosophy_of_computer_science'], ['Prefetching', '/wiki/Prefetching'], ['Programmer', '/wiki/Programmer']]},
{'Q': [['Quaject', '/wiki/Quaject'], ['Quantum image processing', '/wiki/Quantum_image_processing']]},
{'R': [['Reduction Operator', '/wiki/Reduction_Operator']]}, {'S': [['Social cloud computing', '/wiki/Social_cloud_computing'], ['Software', '/wiki/Software'], ['Computer science in sport', '/wiki/Computer_science_in_sport'], ['Supnick matrix', '/wiki/Supnick_matrix'], ['Symbolic execution', '/wiki/Symbolic_execution']]},
{'T': [['Technology transfer in computer science', '/wiki/Technology_transfer_in_computer_science'], ['Trace Cache', '/wiki/Trace_Cache'], ['Transition (computer science)', '/wiki/Transition_(computer_science)']]},
{'V': [['Viola–Jones object detection framework', '/wiki/Viola%E2%80%93Jones_object_detection_framework'], ['Virtual environment', '/wiki/Virtual_environment'], ['Visual computing', '/wiki/Visual_computing']]},
{'W': [['Wiener connector', '/wiki/Wiener_connector']]},
{'Z': [['Wojciech Zaremba', '/wiki/Wojciech_Zaremba']]},
{'Ρ': [['Portal:Computer science', '/wiki/Portal:Computer_science']]}]
full_dict is quite large, and due to its size I am unable to post it entirely here, however, below is an implementation of a function to traverse the structure and select all the elements down to a depth of ten:
def trim_data(d, depth, count):
return {a:None if count == depth else [trim_data(i, depth, count+1) for i in b] for a, b in d.items()}
final_subcategories = trim_data(full_dict, 10, 0)
Edit: script to remove leaves from tree:
def remove_empty_children(d):
return {a:None if len(b) == 1 and not b[0] else
[remove_empty_children(i) for i in b if i] for a, b in d.items()}
When running the above:
c = {'Areas of computer science': [{'Algorithms and data structures': [{'Abstract data types': [{'Priority queues': [{'Heaps (data structures)': [{}]}, {}], 'Heaps (data structures)': [{}]}]}]}]}
d = remove_empty_children(c)
Output:
{'Areas of computer science': [{'Algorithms and data structures': [{'Abstract data types': [{'Priority queues': [{'Heaps (data structures)': None}], 'Heaps (data structures)': None}]}]}]}
Edit 2: flattening the entire structure:
def flatten_groups(d):
for a, b in d.items():
yield a
if b is not None:
for i in map(flatten_groups, b):
yield from i
print(list(flatten_groups(remove_empty_children(c))))
Output:
['Areas of computer science', 'Algorithms and data structures', 'Abstract data types', 'Priority queues', 'Heaps (data structures)', 'Heaps (data structures)']
Edit 3:
To access all the pages for every subcategory to a certain level, the original get_pages function can be utilized and a slightly different version of the group_categories method
def _group_categories(source) -> dict:
return {i.find('div', {'class':'CategoryTreeItem'}).find('a')['href']:(lambda x:None if not x else [group_categories(c) for c in x])(i.find_all('div', {'class':'CategoryTreeChildren'})) for i in source.find_all('div', {'class':'CategoryTreeSection'})}
from collections import namedtuple
page = namedtuple('page', ['pages', 'children'])
def subcategory_pages(d, depth, current = 0):
r = {}
for a, b in d.items():
all_pages_listing = get_pages(requests.get(f'https://en.wikipedia.org{a}').text)
print(f'page number for {a}: {len(all_pages_listing)}')
r[a] = page(all_pages_listing, None if current==depth else [subcategory_pages(i, depth, current+1) for i in b])
return r
print(subcategory_pages(full_dict, 2))
Please note that in order to utilize subcategory_pages, _group_categories must be used in place of group_categories.

Speed up parser: HTML into Database

I need insert all html tags and attributes into database
el.driver.get(url_page)
txthtml = el.driver.page_source
soup = BeautifulSoup(txthtml, "html.parser")
body = soup.find('html')
html_parse(body, el, url_page_id, 0, 0, 0,url_page)
def html_parse(html, el, url_page_id, level, i, parent_id, url_page):
txt = ""
if len(html.text) > 0:
txt = html.text.replace("\n","").replace("\t","").replace("\r","")
ta = tag_list()
ta.p_id = el.id
ta.page_id = url_page_id
ta.level = level
ta.number = i
ta.txt = txt
ta.name = html.name
ta.parent_id = parent_id
ta.html = str(html)
ta.save()
insert_attr(html, el.id, url_page_id, ta.id, url_page)
children = list(html.children)
j = 0
for child in children:
if child.name is None:
continue
j = j + 1
html_parse(child, el, url_page_id, level + 1, j, ta.id, url_page)
When I have recursive function html_parse
html - current html object
el - driver class
url_page_id - id of page
level - level in DOM
i - childe number
parent_id - id of parent
url_page - current URL
tag_list - insert current tag
insert_attr - insert into database attrs of tag
Every html_parse function run fast, but full html parsing run about 4-5 minutes per big html page.
How I can speed up the code?

Categories

Resources