Converting HTML list (<li>) to tabs (i.e. indentation) - python

Have worked in dozens of languages but new to Python.
My first (maybe second) question here, so be gentle...
Trying to efficiently convert HTML-like markdown text to wiki format (specifically, Linux Tomboy/GNote notes to Zim) and have gotten stuck on converting lists.
For a 2-level unordered list like this...
First level
Second level
Tomboy/GNote uses something like...
<list><list-item>First level<list><list-item>Second level</list-item></list></list-item></list>
However, the Zim personal wiki wants that to be...
* First level
* Second level
... with leading tabs.
I've explored the regex module functions re.sub(), re.match(), re.search(), etc. and found the cool Python ability to code repeating text as...
count * "text"
Thus, it looks like there should be a way to do something like...
newnote = re.sub("<list>", LEVEL * "\t", oldnote)
Where LEVEL is the ordinal (occurrance) of <list> in the note. It would thus be 0 for the first <list> incountered, 1 for the second, etc.
LEVEL would then be decremented each time </list> was encountered.
<list-item> tags are converted to the asterisk for the bullet (preceded by newline as appropriate) and </list-item> tags dropped.
Finally... the question...
How do I get the value of LEVEL and use it as a tabs multiplier?

You should really use an xml parser to do this, but to answer your question:
import re
def next_tag(s, tag):
i = -1
while True:
try:
i = s.index(tag, i+1)
except ValueError:
return
yield i
a = "<list><list-item>First level<list><list-item>Second level</list-item></list></list-item></list>"
a = a.replace("<list-item>", "* ")
for LEVEL, ind in enumerate(next_tag(a, "<list>")):
a = re.sub("<list>", "\n" + LEVEL * "\t", a, 1)
a = a.replace("</list-item>", "")
a = a.replace("</list>", "")
print a
This will work for your example, and your example ONLY. Use an XML parser. You can use xml.dom.minidom (it's included in Python (2.7 at least), no need to download anything):
import xml.dom.minidom
def parseList(el, lvl=0):
txt = ""
indent = "\t" * (lvl)
for item in el.childNodes:
# These are the <list-item>s: They can have text and nested <list> tag
for subitem in item.childNodes:
if subitem.nodeType is xml.dom.minidom.Element.TEXT_NODE:
# This is the text before the next <list> tag
txt += "\n" + indent + "* " + subitem.nodeValue
else:
# This is the next list tag, its indent level is incremented
txt += parseList(subitem, lvl=lvl+1)
return txt
def parseXML(s):
doc = xml.dom.minidom.parseString(s)
return parseList(doc.firstChild)
a = "<list><list-item>First level<list><list-item>Second level</list-item><list-item>Second level 2<list><list-item>Third level</list-item></list></list-item></list></list-item></list>"
print parseXML(a)
Output:
* First level
* Second level
* Second level 2
* Third level

Use Beautiful soup , it allows you to iterate in the tags even if they are customs. Very pratical for doing this type of operation
from BeautifulSoup import BeautifulSoup
tags = "<list><list-item>First level<list><list-item>Second level</list-item></list></list-item></list>"
soup = BeautifulSoup(tags)
print [[ item.text for item in list_tag('list-item')] for list_tag in soup('list')]
Output : [[u'First level'], [u'Second level']]
I used a nested list comprehension but you can use a nested for loop
for list_tag in soup('list'):
for item in list_tag('list-item'):
print item.text
I hope that helps you.
In my example I used BeautifulSoup 3 but the example should work with BeautifulSoup4 but only the import change.
from bs4 import BeautifulSoup

Related

WIkipedia API get text under headers

I can scripe a wikipedia usein wikipedia api
import wikipedia
import re
page = wikipedia.page("Albert Einstein")
text = page.content
regex_result = re.findall("==\s(.+?)\s==", text)
print(regex_result)
and I can from every element in a regex_result(Wikipedia headers ) get a text bellow and append it to another list. I dug the internet and I do not know how to do that with some function in Wikipedia API.
Second chance to get it in get a text and with some module extract a text between headers more here: find a some text in string bettwen some specific characters
I have tried this:
l = 0
for n in regex_result:
try:
regal = re.findall(f"==\s{regex_result[l]}\s==(.+?)\s=={regex_result[l+1]}\s==", text)
l+=2
except Exception:
continue
But I am not working:
output is only []
You don't want to call re twice, but rather iterate directly through the results provided by regex_result. Named groups in the form of (?P<name>...) make it even easier to extract the header name without the surrounding markup.
import wikipedia
import re
page = wikipedia.page("Albert Einstein")
text = page.content
# using the number 2 for '=' means you can easily find sub-headers too by increasing the value
regex_result = re.findall("\n={2}\s(?P<header>.+?)\s={2}\n", text)
regex_result will then be a list of strings of the all the top-level section headers.
Here's what I use to make a table of contents from a wiki page. (Note: f-strings require Python 3.6)
def get_wikiheader_regex(level):
'''The top wikiheader level has two = signs, so add 1 to the level to get the correct number.'''
assert isinstance(level, int) and level > -1
header_regex = f"^={{{level+1}}}\s(?P<section>.*?)\s={{{level+1}}}$"
return header_regex
def get_toc(raw_page, level=1):
'''For a single raw wiki page, return the level 1 section headers as a table of contents.'''
toc = []
header_regex = get_wikiheader_regex(level=level)
for line in raw_page.splitlines():
if line.startswith('=') and re.search(header_regex, line):
toc.append(re.search(header_regex, line).group('section'))
return toc
>>> get_toc(text)

Python: extract text from docx to txt via parsing word/document.xml

I would like to extract text from docx files into simple txt file.
I know this problem might seem to be easy or trivial (I hope it will be) but I've looked over dozens of forum topics, spent hours trying to solve by myself and found no solution...
I have borrowed the following code from Etienne's blog.
It works perfectly if I need the content with no formatting. But...
Since my documents contain simple tables, I need them to keep their format with simply using tabulators.
So instead of this:
Name
Age
Wage
John
30
2000
This should appear:
Name Age Wage
John 30 2000
In order not to slide into each other I prefer double tabs for longer lines.
I have examined XML structure a little bit and found out that new rows in tables are indicated by tr, and columns by tc.
So I've tried to modify this a thousand ways but with no success...
Though it's not really working, I copy my idea of approaching the solution:
from lxml.html.defs import form_tags
try:
from xml.etree.cElementTree import XML
except ImportError:
from xml.etree.ElementTree import XML
import zipfile
WORD_NAMESPACE='{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
PARA = WORD_NAMESPACE + 'p'
TEXT = WORD_NAMESPACE + 't'
ROW = WORD_NAMESPACE + 'tr'
COL = WORD_NAMESPACE + 'tc'
def get_docx_text(path):
document = zipfile.ZipFile(path)
xml_content = document.read('word/document.xml')
document.close()
tree = XML(xml_content)
paragraphs = []
for item in tree.iter(ROW or COL or PARA):
texts = []
print(item)
if item is ROW:
texts.append('\n')
elif item is COL:
texts.append('\t\t')
elif item is PARA:
for node in item.iter(TEXT):
if node.text:
texts.append(node.text)
if texts:
paragraphs.append(''.join(texts))
return '\n\n'.join(paragraphs)
text_file = open("output.txt", "w")
text_file.write(get_docx_text('input.docx'))
text_file.close()
I'm not very sure about how the syntactics should look like. The output gives nothing, and for a few trial it resulted something but it was even worse than nothing.
I put print(item) just for checking. But instead of every ROW, COL and PARA items it will list me ROWs only. So it seems like in the condition of the for loop the program seems to ingore the or connection of terms. If it cannot find ROW, it won't execute the 2 remaining options but skip instantly to the next item. I tried it with giving a list of the terms, as well.
Inside it the if/elif blocks I think e.g. if item is ROW should examine whether 'item' and 'ROW' are identical (and they actually are).
X or Y or Z evaluates to the first of three values, which is casted to True. Non-empty strings are always True. So, for item in tree.iter(ROW or COL or PARA) evaluates to for item in tree.iter(ROW) — this is why you are getting only row elements inside your loop.
iter() method of ElementTree object can only accept one tag name, so you should perhaps just iterate over the whole tree (won't be a problem if document is not big).
is is not going to work here. It is an identity operator and only returns True if objects compared are identical (i. e. variables compared refer to the same Python object). In your if... elif... you're comparing a constant str (ROW, COL, PARA) and Element object, which is created anew in each iteration, so, obviously, these two are not the same object and each comparison will return False.
Instead you should use something like if item.tag == ROW.
All of the above taken into account, you should rewrite your loop section like this:
for item in tree.iter():
texts = []
print(item)
if item.tag == ROW:
texts.append('\n')
elif item.tag == COL:
texts.append('\t\t')
elif item.tag == PARA:
for node in item.iter(TEXT):
if node.text:
texts.append(node.text)
if texts:
paragraphs.append(''.join(texts))
The answer above won't work like you asked. This should work for documents containing only tables; some additional parsing with findall should help you isolate non-table data and make this work for a document with tables and other text:
TABLE = WORD_NAMESPACE + 'tbl'
for item in tree.iter(): # use this for loop instead
#print(item.tag)
if item.tag == TABLE:
for row in item.iter(ROW):
texts.append('\n')
for col in row.iter(COL):
texts.append('\t')
for ent in col.iter(TEXT):
if ent.text:
texts.append(ent.text)
return ''.join(texts)

Using BeautifulSoup to find a tag and evaluate whether it fits some criteria

I am writing a program to extract text from a website and write it into a text file. Each entry in the text file should have 3 values separated by a tab. The first value is hard-coded to XXXX, the 2nd value should initialize to the first item on the website with , and the third value is the next item on the website with a . The logic I'm trying to introduce is looking for the first and write the associated string into the text file. Then find the next and write the associated string into the text file. Then, look for the next p class. If it's "style4", start a new line, if it's another "style5", write it into the text file with the first style5 entry but separated with a comma (alternatively, the program could just skip the next style5.
I'm stuck on the part of the program in bold. That is, getting the program to look for the next p class and evaluate it against style4 and style5. Since I was having problems with finding and evaluating the p class tag, I chose to pull my code out of the loop and just try to accomplish the first iteration of the task for starters. Here's my code so far:
import urllib2
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib2.urlopen('http://www.kcda.org/KCDA_Awarded_Contracts.htm').read())
next_vendor = soup.find('p', {'class': 'style4'})
print next_vendor
next_commodity = next_vendor.find_next('p', {'class': 'style5'})
print next_commodity
next = next_commodity.find_next('p')
print next
I'd appreciate any help anybody can provide! Thanks in advance!
I am not entirely sure how you are expecting your output to be. I am assuming that you are trying to get the data in the webpage in the format:
Alphabet \t Vendor \t Category
You can do this:
# The basic things
import urllib2
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib2.urlopen('http://www.kcda.org/KCDA_Awarded_Contracts.htm').read())
Get the td of interest:
table = soup.find('table')
data = table.find_all('tr')[-1]
data = data.find_all('td')[1:]
Now, we will create a nested output dictionary with alphabets as the keys and an inner dict as the value. The inner dict has vendor name as key and category information as it's value
output_dict = {}
current_alphabet = ""
current_vendor = ""
for td in data:
for p in td.find_all('p'):
print p.text.strip()
if p.get('class')[0] == 'style6':
current_alphabet = p.text.strip()
vendors = {}
output_dict[current_alphabet] = vendors
continue
if p.get('class')[0] == 'style4':
print "Here"
current_vendor = p.text.strip()
category = []
output_dict[current_alphabet][current_vendor] = category
continue
output_dict[current_alphabet][current_vendor].append(p.text.strip())
This gets the output_dict in the format:
{ ...
u'W': { u'WTI - Weatherproofing Technologies': [u'Roofing'],
u'Wenger Corporation': [u'Musical Instruments and Equipment'],
u'Williams Scotsman, Inc': [u'Modular/Portable Buildings'],
u'Witt Company': [u'Interactive Technology']
},
u'X': { u'Xerox': [u"Copiers & MFD's", u'Printers']
}
}
Skipping the earlier parts for brevity. Now it is just a matter of accessing this dictionary and writing out to a tab separated file.
Hope this helps.
Agree with #shaktimaan. Using a dictionary or list is a good approach here. My attempt is slightly different.
import requests as rq
from bs4 import BeautifulSoup as bsoup
import csv
url = "http://www.kcda.org/KCDA_Awarded_Contracts.htm"
r = rq.get(url)
soup = bsoup(r.content)
primary_line = soup.find_all("p", {"class":["style4","style5"]})
final_list = {}
for line in primary_line:
txt = line.get_text().strip().encode("utf-8")
if txt != "\xc2\xa0":
if line["class"][0] == "style4":
key = txt
final_list[key] = []
else:
final_list[key].append(txt)
with open("products.csv", "wb") as ofile:
f = csv.writer(ofile)
for item in final_list:
f.writerow([item, ", ".join(final_list[item])])
For the scrape, we isolate style4 and style5 tags right away. I did not bother going for the style6 or the alphabet headers. We then get the text inside each tag. If the text is not a whitespace of sorts (this is all over the tables, probably obfuscation or bad mark-up), we then check if it's style4 or style5. If it's the former, we assign it as a key to a blank list. If it 's the latter, we append it to the blank list of the most recent key. Obviously the key changes every time we hit a new style4 only so it's a relatively safe approach.
The last part is easy: we just use ", ".join on the value part of the key-value pair to concatenate the list as one string. We then write it to a CSV file.
Due to the dictionary being unsorted, the resulting CSV file will not be sorted alphabetically. Screenshot of result below:
Changing it to a tab-delimited file is up to you. That's simple enough. Hope this helps!

Parse HTML Table with Python BeautifulSoup

I am attempting to use BeautifulSoup to parse an html table which I uploaded to http://pastie.org/8070879 in order to get the three columns (0 to 735, 0.50 to 1.0 and 0.5 to 0.0) as lists. To explain why, I will want the integers 0-735 to be keys and the decimal numbers to be values.
From reading many of the other posts on SO, I have come up with the following which does not come close to creating the lists I want. All it does is display the text in the table as is seen here http://i1285.photobucket.com/albums/a592/TheNexulo/output_zps20c5afb8.png
from bs4 import BeautifulSoup
soup = BeautifulSoup(open("fide.html"))
table = soup.find('table')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
for td in cols:
text = ''.join(td.find(text=True))
print text + "|",
print
I'm new to Python and BeautifulSoup, so please be gentle with me! Thanks
HTML parsers like BeautifulSoup presume that what you want is an object model that mirrors the input HTML structure. But sometimes (like in this case) that model gets in the way more than helps. Pyparsing includes some HTML parsing features that are more robust than just using raw regexes, but otherwise work in similar fashion, letting you define snippets of HTML of interest, and just ignoring the rest. Here is a parser that reads through your posted HTML source:
from pyparsing import makeHTMLTags,withAttribute,Suppress,Regex,Group
""" looking for this recurring pattern:
<td valign="top" bgcolor="#FFFFCC">00-03</td>
<td valign="top">.50</td>
<td valign="top">.50</td>
and want a dict with keys 0, 1, 2, and 3 all with values (.50,.50)
"""
td,tdend = makeHTMLTags("td")
keytd = td.copy().setParseAction(withAttribute(bgcolor="#FFFFCC"))
td,tdend,keytd = map(Suppress,(td,tdend,keytd))
realnum = Regex(r'1?\.\d+').setParseAction(lambda t:float(t[0]))
integer = Regex(r'\d{1,3}').setParseAction(lambda t:int(t[0]))
DASH = Suppress('-')
# build up an expression matching the HTML bits above
entryExpr = (keytd + integer("start") + DASH + integer("end") + tdend +
Group(2*(td + realnum + tdend))("vals"))
This parser not only picks out the matching triples, it also extracts the start-end integers and the pairs of real numbers (and also already converts from string to integers or floats at parse time).
Looking at the table, I'm guessing you actually want a lookup that will take a key like 700, and return the pair of values (0.99, 0.01), since 700 falls in the range of 620-735. This bit of code searches the source HTML text, iterates over the matched entries and inserts key-value pairs into the dict lookup:
# search the input HTML for matches to the entryExpr expression, and build up lookup dict
lookup = {}
for entry in entryExpr.searchString(sourcehtml):
for i in range(entry.start, entry.end+1):
lookup[i] = tuple(entry.vals)
And now to try out some lookups:
# print out some test values
for test in (0,20,100,700):
print (test, lookup[test])
prints:
0 (0.5, 0.5)
20 (0.53, 0.47)
100 (0.64, 0.36)
700 (0.99, 0.01)
I think the above answer is better than what I would offer, but I have a BeautifulSoup answer that can get you started. This is a bit hackish, but I figured I would offer it nevertheless.
With BeautifulSoup, you can find all the tags with certain attributes in the following way (assuming you have a soup.object already set up):
soup.find_all('td', attrs={'bgcolor':'#FFFFCC'})
That will find all of your keys. The trick is to associate these with the values you want, which all show up immediately afterward and which are in pairs (if these things change, by the way, this solution won't work).
Thus, you can try the following to access what follows your key entries and put those into your_dictionary:
for node in soup.find_all('td', attrs={'bgcolor':'#FFFFCC'}):
your_dictionary[node.string] = node.next_sibling
The problem is that the "next_sibling" is actually a '\n', so you have to do the following to capture the next value (the first value you want):
for node in soup.find_all('td', attrs={'bgcolor':'#FFFFCC'}):
your_dictionary[node.string] = node.next_sibling.next_sibling.string
And if you want the two following values, you have to double this:
for node in soup.find_all('td', attrs={'bgcolor':'#FFFFCC'}):
your_dictionary[node.string] = [node.next_sibling.next_sibling.string, node.next_sibling.next_sibling.next_sibling.next_sibling.string]
Disclaimer: that last line is pretty ugly to me.
I've used BeautifulSoup 3, but it probably will work under 4.
# Import System libraries
import re
# Import Custom libraries
from BeautifulSoup import BeautifulSoup
# This may be different between BeautifulSoup 3 and BeautifulSoup 4
with open("fide.html") as file_h:
# Read the file into the BeautifulSoup class
soup = BeautifulSoup(file_h.read())
tr_location = lambda x: x.name == u"tr" # Row location
key_location = lambda x: x.name == u"td" and bool(set([(u"bgcolor", u"#FFFFCC")]) & set(x.attrs)) # Integer key location
td_location = lambda x: x.name == u"td" and not dict(x.attrs).has_key(u"bgcolor") # Float value location
str_key_dict = {}
num_key_dict = {}
for tr in soup.findAll(tr_location): # Loop through all found rows
for key in tr.findAll(key_location): # Loop through all found Integer key tds
key_list = []
key_str = key.text.strip()
for td in key.findNextSiblings(td_location)[:2]: # Loop through the next 2 neighbouring Float values
key_list.append(td.text)
key_list = map(float, key_list) # Convert the text values to floats
# String based dictionary section
str_key_dict[key_str] = key_list
# Number based dictionary section
num_range = map(int, re.split("\s*-\s*", key_str)) # Extract a value range to perform interpolation
if(len(num_range) == 2):
num_key_dict.update([(x, key_list) for x in range(num_range[0], num_range[1] + 1)])
else:
num_key_dict.update([(num_range[0], key_list)])
for x in num_key_dict.items():
print x

Problem with newlines when I use toprettyxml()

I'm currently using the toprettyxml() function of the xml.dom module in a Python script and I'm having some trouble with the newlines.
If don't use the newl parameter or if I use toprettyxml(newl='\n') it displays several newlines instead of only one.
For instance
f = open(filename, 'w')
f.write(dom1.toprettyxml(encoding='UTF-8'))
f.close()
displayed:
<params>
<param name="Level" value="#LEVEL#"/>
<param name="Code" value="281"/>
</params>
Does anyone know where the problem comes from and how I can use it?
FYI I'm using Python 2.6.1
I found another great solution :
f = open(filename, 'w')
dom_string = dom1.toprettyxml(encoding='UTF-8')
dom_string = os.linesep.join([s for s in dom_string.splitlines() if s.strip()])
f.write(dom_string)
f.close()
Above solution basically removes the unwanted newlines from the dom_string which are generated by toprettyxml().
Inputs taken from -> What's a quick one-liner to remove empty lines from a python string?
toprettyxml() is quite awful. It is not a matter of Windows and '\r\n'. Trying any string as the newlparameter shows that too many lines are being added. Not only that, but other blanks (that may cause you problems when a machine reads the xml) are also added.
Some workarounds available at
http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace
toprettyxml(newl='') works for me on Windows.
This is a pretty old question but I guess I know what the problem is:
Minidoms pretty print has a pretty straight forward method. It just adds the characters that you specified as arguments. That means, it will duplicate the characters if they already exist.
E.g. if you parse an XML file that looks like this:
<parent>
<child>
Some text
</child>
</parent>
there are already newline characters and indentions within the dom. Those are taken as text nodes by minidom and are still there when you parse it it into a dom object.
If you now proceed to convert the dom object into an XML string, those text nodes will still be there. Meaning new line characters and indent tabs are still remaining. Using pretty print now, will just add more new lines and more tabs. That's why in this case not using pretty print at all or specifying newl='' will result in the wanted output.
However, you generate the dom in your script, the text nodes will not be there, therefore pretty printing with newl='\r\n' and/or addindent='\t' will turn out quite pretty.
TL;DR Indents and newlines remain from parsing and pretty print just adds more
If you don't mind installing new packages, try beautifulsoup. I had very good experiences with its xml prettyfier.
Following function worked for my problem.
I had to use python 2.7 and i was not allowed to install any 3rd party additional package.
The crux of implementation is as follows:
Use dom.toprettyxml()
Remove all white spaces
Add new lines and tabs as per your requirement.
~
import os
import re
import xml.dom.minidom
import sys
class XmlTag:
opening = 0
closing = 1
self_closing = 2
closing_tag = "</"
self_closing_tag = "/>"
opening_tag = "<"
def to_pretty_xml(xml_file_path):
pretty_xml = ""
space_or_tab_count = " " # Add spaces or use \t
tab_count = 0
last_tag = -1
dom = xml.dom.minidom.parse(xml_file_path)
# get pretty-printed version of input file
string_xml = dom.toprettyxml(' ', os.linesep)
# remove version tag
string_xml = string_xml.replace("<?xml version=\"1.0\" ?>", '')
# remove empty lines and spaces
string_xml = "".join(string_xml.split())
# move each tag to new line
string_xml = string_xml.replace('>', '>\n')
for line in string_xml.split('\n'):
if line.__contains__(XmlTag.closing_tag):
# For consecutive closing tags decrease the indentation
if last_tag == XmlTag.closing:
tab_count = tab_count - 1
# Move closing element to next line
if last_tag == XmlTag.closing or last_tag == XmlTag.self_closing:
pretty_xml = pretty_xml + '\n' + (space_or_tab_count * tab_count)
pretty_xml = pretty_xml + line
last_tag = XmlTag.closing
elif line.__contains__(XmlTag.self_closing_tag):
# Print self closing on next line with one indentation from parent node
pretty_xml = pretty_xml + '\n' + (space_or_tab_count * (tab_count+1)) + line
last_tag = XmlTag.self_closing
elif line.__contains__(XmlTag.opening_tag):
# For consecutive opening tags increase the indentation
if last_tag == XmlTag.opening:
tab_count = tab_count + 1
# Move opening element to next line
if last_tag == XmlTag.opening or last_tag == XmlTag.closing:
pretty_xml = pretty_xml + '\n' + (space_or_tab_count * tab_count)
pretty_xml = pretty_xml + line
last_tag = XmlTag.opening
return pretty_xml
pretty_xml = to_pretty_xml("simple.xml")
with open("pretty.xml", 'w') as f:
f.write(pretty_xml)
This gives me nice XML on Python 3.6, haven't tried on Windows:
dom = xml.dom.minidom.parseString(xml_string)
pretty_xml_as_string = dom.toprettyxml(newl='').replace("\n\n", "\n")
Are you viewing the resulting file on Windows? If so, try using toprettyxml(newl='\r\n').

Categories

Resources