correctly tokenize english contractions with unicode apostrophes - python

How do you modify the default spacy (v3.0.5) tokenizer to correctly split english contractions if unicode apostrophes (not ') are used.
import spacy
nlp = spacy.load('en_core_web_sm')
apostrophes = ["'",'\u02B9', '\u02BB', '\u02BC', '\u02BD', '\u02C8', '\u02CA', '\u02CB', '\u0060', '\u00B4']
for apo in apostrophes:
text = f"don{apo}t"
print([t for t in nlp(text)])
>>>
[do, n't]
[donʹt]
[donʻt]
[donʼt]
[donʽt]
[donˈt]
[donˊt]
[donˋt]
[don`t]
[don´t]
The desired output for all examples is [do, n't]
My best guess was to extend the default tokenizer_exceptions with all possible apostrophe variations. But this does not work as Tokenizer special cases are not allowed to modify text.
import spacy
from spacy.util import compile_prefix_regex, compile_suffix_regex, compile_infix_regex
nlp = spacy.load('en_core_web_sm')
apostrophes = ['\u02B9', '\u02BB', '\u02BC', '\u02BD', '\u02C8', '\u02CA', '\u02CB', '\u0060', '\u00B4']
default_rules = nlp.Defaults.tokenizer_exceptions
extended_rules = default_rules.copy()
for key, val in default_rules.items():
if "'" in key:
for apo in apostrophes:
extended_rules[key.replace("'", apo)] = val
rules = nlp.Defaults.tokenizer_exceptions
infix_re = compile_infix_regex(nlp.Defaults.infixes)
prefix_re = compile_prefix_regex(nlp.Defaults.prefixes)
suffix_re = compile_suffix_regex(nlp.Defaults.suffixes)
nlp.tokenizer = spacy.tokenizer.Tokenizer(
nlp.vocab,
rules = extended_rules,
prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer,
)
apostrophes = ["'",'\u02B9', '\u02BB', '\u02BC', '\u02BD', '\u02C8', '\u02CA', '\u02CB', '\u0060', '\u00B4']
for apo in apostrophes:
text = f"don{apo}t"
print([t for t in nlp(text)])
>>> ValueError: [E997] Tokenizer special cases are not allowed to modify the text. This would map ':`(' to ':'(' given token attributes '[{65: ":'("}]'.

You just need to add an exception without changing the text.
import spacy
nlp = spacy.load('en_core_web_sm')
from spacy.attrs import ORTH, NORM
case = [{ORTH: "do"}, {ORTH: "n`t", NORM: "not"}]
tokenizer = nlp.tokenizer
tokenizer.add_special_case("don`t", case)
doc = nlp("I don`t believe in bugs")
print(list(doc))
# => [I, do, n`t, believe, in, bugs]
If you want to change the text you should do it as a preprocessing step.

Related

Why is my word lemmatization not working as expected?

Hi stackoverflow community!
Long-time reader but first-time poster. I'm currently trying my hand at NLP and after reading a few forum posts touching upon this topic, I can't seem to get the lemmatizer to work properly (function pasted below). Comparing my original text vs preprocessed text, all the cleaning steps work as expected, except the lemmatization. I've even tried specifying the part of speech : 'v' to not default the word as noun, and still get the base form of the verb (ex: turned -> turn , are -> be, reading -> read) ... however this doesn't seem to be working.
Appreciate another set of eyes and feedback - thanks!
# key imports
import pandas as pd
import numpy as np
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from string import punctuation
from nltk.stem import WordNetLemmatizer
import contractions
# cleaning functions
def to_lower(text):
'''
Convert text to lowercase
'''
return text.lower()
def remove_punct(text):
return ''.join(c for c in text if c not in punctuation)
def remove_stopwords(text):
'''
Removes stop words which don't have meaning (ex: is, the, a, etc.)
'''
additional_stopwords = ['app']
stop_words = set(stopwords.words('english')) - set(['not','out','in'])
stop_words = stop_words.union(additional_stopwords)
return ' '.join([w for w in nltk.word_tokenize(text) if not w in stop_words])
def fix_contractions(text):
'''
Expands contractions
'''
return contractions.fix(text)
# preprocessing pipeline
def preprocess(text):
# convert to lower case
lower_text = to_lower(text)
sentence_tokens = sent_tokenize(lower_text)
word_list = []
for each_sent in sentence_tokens:
# fix contractions
clean_text = fix_contractions(each_sent)
# remove punctuation
clean_text = remove_punct(clean_text)
# filter out stop words
clean_text = remove_stopwords(clean_text)
# get base form of word
wnl = WordNetLemmatizer()
for part_of_speech in ['v']:
lemmatized_word = wnl.lemmatize(clean_text, part_of_speech)
# split the sentence into word tokens
word_tokens = word_tokenize(lemmatized_word)
for i in word_tokens:
word_list.append(i)
return word_list
# lemmatize not properly working to get base form of word
# ex: 'turned' still remains 'turned' without returning base form 'turn'
# ex: 'running' still remains 'running' without getting base form 'run'
sample_data = posts_with_text['post_text'].head(5)
print(sample_data)
sample_data.apply(preprocess)

Force spacy not to parse punctuation?

Is there a way to force spacy not to parse punctuation as separate tokens ???
nlp = spacy.load('en')
doc = nlp(u'the $O is in $R')
[ w for w in doc ]
: [the, $, O, is, in, $, R]
I want :
: [the, $O, is, in, $R]
Customize the prefix_search function for the spaCy's Tokenizer class. Refer documentation. Something like:
import spacy
import re
from spacy.tokenizer import Tokenizer
# use any currency regex match as per your requirement
prefix_re = re.compile('''^\$[a-zA-Z0-9]''')
def custom_tokenizer(nlp):
return Tokenizer(nlp.vocab, prefix_search=prefix_re.search)
nlp = spacy.load("en_core_web_sm")
nlp.tokenizer = custom_tokenizer(nlp)
doc = nlp(u'the $O is in $R')
print([t.text for t in doc])
# ['the', '$O', 'is', 'in', '$R']
Yes, there is. For example,
import spacy
import regex as re
from spacy.tokenizer import Tokenizer
prefix_re = re.compile(r'''^[\[\+\("']''')
suffix_re = re.compile(r'''[\]\)"']$''')
infix_re = re.compile(r'''[\(\-\)\#\.\:\$]''') #you need to change the infix tokenization rules
simple_url_re = re.compile(r'''^https?://''')
def custom_tokenizer(nlp):
return Tokenizer(nlp.vocab, prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer,
token_match=simple_url_re.match)
nlp = spacy.load('en_core_web_sm')
nlp.tokenizer = custom_tokenizer(nlp)
doc = nlp(u'the $O is in $R')
print [w for w in doc] #prints
[the, $O, is, in, $R]
You just need to add '$' character to the infix regex (with an escape character '\' obviously).
Aside: Have included prefix and suffix to showcase the flexibility of spaCy tokenizer. In your case just the infix regex will suffice.

spacy tokenization merges the wrong tokens

I would like to use spacy for tokenizing Wikipedia scrapes. Ideally it would work like this:
text = 'procedure that arbitrates competing models or hypotheses.[2][3] Researchers also use experimentation to test existing theories or new hypotheses to support or disprove them.[3][4]'
# run spacy
spacy_en = spacy.load("en")
doc = spacy_en(text, disable=['tagger', 'ner'])
tokens = [tok.text.lower() for tok in doc]
# desired output
# tokens = [..., 'models', 'or', 'hypotheses', '.', '[2][3]', 'Researchers', ...
# actual output
# tokens = [..., 'models', 'or', 'hypotheses.[2][3', ']', 'Researchers', ...]
The problem is that the 'hypotheses.[2][3]' is glued together into one token.
How can I prevent spacy from connecting this '[2][3]' to the previous token?
As long as it is split from the word hypotheses and the point at the end of the sentence, I don't care how it is handled. But individual words and grammar should stay apart from syntactical noise.
So for example, any of the following would be a desirable output:
'hypotheses', '.', '[2][', '3]'
'hypotheses', '.', '[2', '][3]'
I think you could try playing around with infix:
import re
import spacy
from spacy.tokenizer import Tokenizer
infix_re = re.compile(r'''[.]''')
def custom_tokenizer(nlp):
return Tokenizer(nlp.vocab, infix_finditer=infix_re.finditer)
nlp = spacy.load('en')
nlp.tokenizer = custom_tokenizer(nlp)
doc = nlp(u"hello-world! I am hypothesis.[2][3]")
print([t.text for t in doc])
More on this https://spacy.io/usage/linguistic-features#native-tokenizers

How to apply a custom stemmer before passing the training corpus to TfidfVectorizer in sklearn?

Here is my code, I have a sentence and I want to tokenize and stem it before passing it to TfidfVectorizer to finally to get a tf-idf representation of the sentence:
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.stem.snowball import SnowballStemmer
stemmer_ita = SnowballStemmer("italian")
def tokenizer_stemmer_ita(text):
return [stemmer_ita.stem(word) for word in text.split()]
def sentence_tokenizer_stemmer(text):
return " ".join([stemmer_ita.stem(word) for word in text.split()])
X_train = ['il libro è sul tavolo']
X_train = [sentence_tokenizer_stemmer(text) for text in X_train]
tfidf = TfidfVectorizer(preprocessor=None, tokenizer=None, use_idf=True, stop_words=None, ngram_range=(1,2))
X_train = tfidf.fit_transform(X_train)
# let's see the features
print (tfidf.get_feature_names())
I get as output:
['il', 'il libr', 'libr', 'libr sul', 'sul', 'sul tavol', 'tavol']
if I change the parameter
tokenizer=None
to:
tokenizer=tokenizer_stemmer_ita
and I comment this line:
X_train = [sentence_tokenizer_stemmer(text) for text in X_train]
I expect to get the same result but the result is different:
['il', 'il libr', 'libr', 'libr è', 'sul', 'sul tavol', 'tavol', 'è', 'è sul']
Why? Am I implementing correctly the external stemmer? It seems, at least, that the stopwords ("è") are removed in the first run, even if stop_words=None.
[edit]
As suggested by Vivek, the problem seems to be the default token patter, which is applied anyway when tokenizer = None. So if a add these two lines at the beginning of tokenizer_stemmer_ita:
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
text = " ".join( token_pattern.findall(text) )
I should get the correct behaviour, and in fact I get it for the above simple example, but for a different example:
X_train = ['0.05%.\n\nVedete?']
I don't, the two outputs are different:
['05', '05 ved', 'ved']
and
['05', '05 vedete', 'vedete']
why? In this case the question mark seems to be the problem, without it the output are identical.
[edit2]
It seems I have to stem first and then apply the regex, in this case the two outputs are identical.
Thats because of default tokenizer pattern token_pattern used in TfidfVectorizer:
token_pattern : string
Regular expression denoting what constitutes a “token”, only used if analyzer == 'word'. The default regexp selects tokens of 2 or more
alphanumeric characters (punctuation is completely ignored and always
treated as a token separator).
So the character è is not selected.
import re
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
print token_pattern.findall('il libro è sul tavolo')
# Output
# ['il', 'libro', 'sul', 'tavolo']
This default token_pattern is used when tokenizer is None, as you are experiencing.

How could spacy tokenize hashtag as a whole?

In a sentence containing hashtags, such as a tweet, spacy's tokenizer splits hashtags into two tokens:
import spacy
nlp = spacy.load('en')
doc = nlp(u'This is a #sentence.')
[t for t in doc]
output:
[This, is, a, #, sentence, .]
I'd like to have hashtags tokenized as follows, is that possible?
[This, is, a, #sentence, .]
I also tried several ways to prevent spaCy from splitting hashtags or words with hyphens like "cutting-edge". My experience is that merging tokens afterwards can be problematic, because the pos tagger and dependency parsers already used the wrong tokens for their decisions. Touching the infix, prefix, suffix regexps is kind of error prone / complex, because you don't want to produce side effects by your changes.
The simplest way is indeed, as pointed out by before, to modify the token_match function of the tokenizer. This is a re.match identifying regular expressions that will not be split. Instead of importing the speficic URL pattern I'd rather extend whatever spaCy's default is.
from spacy.tokenizer import _get_regex_pattern
nlp = spacy.load('en')
# get default pattern for tokens that don't get split
re_token_match = _get_regex_pattern(nlp.Defaults.token_match)
# add your patterns (here: hashtags and in-word hyphens)
re_token_match = f"({re_token_match}|#\w+|\w+-\w+)"
# overwrite token_match function of the tokenizer
nlp.tokenizer.token_match = re.compile(re_token_match).match
text = "#Pete: choose low-carb #food #eatsmart ;-) 😋👍"
doc = nlp(text)
This yields:
['#Pete', ':', 'choose', 'low-carb', '#food', '#eatsmart', ';-)', '😋', '👍']
This is more of a add-on to the great answer by #DhruvPathak AND a shameless copy from the below linked github thread (and the even better answer by #csvance). spaCy features (since V2.0) the add_pipe method. Meaning you can define #DhruvPathak great answer in a function and add the step (conveniently) into your nlp processing pipeline, as below.
Citations starts here:
def hashtag_pipe(doc):
merged_hashtag = False
while True:
for token_index,token in enumerate(doc):
if token.text == '#':
if token.head is not None:
start_index = token.idx
end_index = start_index + len(token.head.text) + 1
if doc.merge(start_index, end_index) is not None:
merged_hashtag = True
break
if not merged_hashtag:
break
merged_hashtag = False
return doc
nlp = spacy.load('en')
nlp.add_pipe(hashtag_pipe)
doc = nlp("twitter #hashtag")
assert len(doc) == 2
assert doc[0].text == 'twitter'
assert doc[1].text == '#hashtag'
Citation ends here; Check out how to add hashtags to the part of speech tagger #503 for the full thread.
PS It's clear when reading the code, but for the copy&pasters, don't disable the parser :)
You can do some pre and post string manipulations,which shall make you bypass '#' based tokenization, and is easy to implement. e.g
> >>> import re
> >>> import spacy
> >>> nlp = spacy.load('en')
> >>> sentence = u'This is my twitter update #MyTopic'
> >>> parsed = nlp(sentence)
> >>> [token.text for token in parsed]
[u'This', u'is', u'my', u'twitter', u'update', u'#', u'MyTopic']
> >>> new_sentence = re.sub(r'#(\w+)',r'ZZZPLACEHOLDERZZZ\1',sentence)
> >>> new_sentence u'This is my twitter update ZZZPLACEHOLDERZZZMyTopic'
> >>> parsed = nlp(new_sentence)
> >>> [token.text for token in parsed]
[u'This', u'is', u'my', u'twitter', u'update', u'ZZZPLACEHOLDERZZZMyTopic']
> >>> [x.replace(u'ZZZPLACEHOLDERZZZ','#') for x in [token.text for token in parsed]]
[u'This', u'is', u'my', u'twitter', u'update', u'#MyTopic']
You can try setting custom seperators in spacy's tokenizer.
I am not aware of methods to do that.
UPDATE : You can use a regex to find span of token you would want to stay as single token, and retokenize using span.merge method as mentioned here : https://spacy.io/docs/api/span#merge
Merge example:
>>> import spacy
>>> import re
>>> nlp = spacy.load('en')
>>> my_str = u'Tweet hashtags #MyHashOne #MyHashTwo'
>>> parsed = nlp(my_str)
>>> [(x.text,x.pos_) for x in parsed]
[(u'Tweet', u'PROPN'), (u'hashtags', u'NOUN'), (u'#', u'NOUN'), (u'MyHashOne', u'NOUN'), (u'#', u'NOUN'), (u'MyHashTwo', u'PROPN')]
>>> indexes = [m.span() for m in re.finditer('#\w+',my_str,flags=re.IGNORECASE)]
>>> indexes
[(15, 25), (26, 36)]
>>> for start,end in indexes:
... parsed.merge(start_idx=start,end_idx=end)
...
#MyHashOne
#MyHashTwo
>>> [(x.text,x.pos_) for x in parsed]
[(u'Tweet', u'PROPN'), (u'hashtags', u'NOUN'), (u'#MyHashOne', u'NOUN'), (u'#MyHashTwo', u'PROPN')]
>>>
I found this on github, which uses spaCy's Matcher:
from spacy.matcher import Matcher
matcher = Matcher(nlp.vocab)
matcher.add('HASHTAG', None, [{'ORTH': '#'}, {'IS_ASCII': True}])
doc = nlp('This is a #sentence. Here is another #hashtag. #The #End.')
matches = matcher(doc)
hashtags = []
for match_id, start, end in matches:
hashtags.append(doc[start:end])
for span in hashtags:
span.merge()
print([t.text for t in doc])
outputs:
['This', 'is', 'a', '#sentence', '.', 'Here', 'is', 'another', '#hashtag', '.', '#The', '#End', '.']
A list of hashtags is also available in the hashtags list:
print(hashtags)
output:
[#sentence, #hashtag, #The, #End]
I spent quite a bit of time on this and found I share what I came up with:
Subclassing the Tokenizer and adding the regex for hashtags to the default URL_PATTERN was the easiest solution for me, additionally adding a custom extension to match on hashtags to identify them:
import re
import spacy
from spacy.language import Language
from spacy.tokenizer import Tokenizer
from spacy.tokens import Token
nlp = spacy.load('en_core_web_sm')
def create_tokenizer(nlp):
# contains the regex to match all sorts of urls:
from spacy.lang.tokenizer_exceptions import URL_PATTERN
# spacy defaults: when the standard behaviour is required, they
# need to be included when subclassing the tokenizer
prefix_re = spacy.util.compile_prefix_regex(Language.Defaults.prefixes)
infix_re = spacy.util.compile_infix_regex(Language.Defaults.infixes)
suffix_re = spacy.util.compile_suffix_regex(Language.Defaults.suffixes)
# extending the default url regex with regex for hashtags with "or" = |
hashtag_pattern = r'''|^(#[\w_-]+)$'''
url_and_hashtag = URL_PATTERN + hashtag_pattern
url_and_hashtag_re = re.compile(url_and_hashtag)
# set a custom extension to match if token is a hashtag
hashtag_getter = lambda token: token.text.startswith('#')
Token.set_extension('is_hashtag', getter=hashtag_getter)
return Tokenizer(nlp.vocab, prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer,
token_match=url_and_hashtag_re.match
)
nlp.tokenizer = create_tokenizer(nlp)
doc = nlp("#spreadhappiness #smilemore so_great#good.com https://www.somedomain.com/foo")
for token in doc:
print(token.text)
if token._.is_hashtag:
print("-> matches hashtag")
# returns: "#spreadhappiness -> matches hashtag #smilemore -> matches hashtag so_great#good.com https://www.somedomain.com/foo"

Categories

Resources