I have a Dataframe of some tweets about the Russia-Ukraine conflict and I have pos_tagged the tweets after cleaning and want to lemmatize postagged column. My code returns only the first pos_tagged word as a lemma. How can I correctly lemmatize the pos_tagged column?
Function to pos tag cleaned tweets:
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from nltk import pos_tag
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('wordnet')
from nltk.corpus import wordnet
# POS tagger dictionary
pos_dict = {'J':wordnet.ADJ, 'V':wordnet.VERB, 'N':wordnet.NOUN, 'R':wordnet.ADV}
def token_stop_pos(text):
tags = pos_tag(word_tokenize(text))
newlist = []
for word, tag in tags:
if word.lower() not in set(stopwords.words('english')):
newlist.append(tuple([word, pos_dict.get(tag[0])]))
return newlist
df['POS_tagged'] = df['cleanedTweets'].apply(token_stop_pos)
Function to lemmatize df['POS_tagged'] column:
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
def lemmatize(pos_data):
lemma_rew = " "
for word, pos in pos_data:
if not pos:
lemma = word
lemma_rew = lemma_rew + " " + lemma
else:
lemma = wordnet_lemmatizer.lemmatize(word, pos=pos)
lemma_rew = lemma_rew + " " + lemma
return lemma_rew
df['Lemma'] = df['POS_tagged'].apply(lemmatize)
Output
Data after lemmatizing
I'm trying to get the full tag from nltk pos_tag, but I can't find a simple way to do it using nltk. For example, using tagsets='universal'.
from nltk.tokenize import word_tokenize
def nltk_pos(text):
token = word_tokenize(text)
return (nltk.pos_tag(token)[0])[1]
nltk_pos('home')
output: 'NN'
expected output: 'NOUN'
I had the same problem when doing NLP analysis for a paper I wrote. I had to use a mapping function like this:
import nltk
from nltk.tokenize import word_tokenize
def get_full_tag_pos(pos_tag):
tag_dict = {"J": "ADJ",
"N": "NOUN",
"V": "VERB",
"R": "ADV"}
# assuming pos_tag comes in as capital letters i.e. 'JJR' or 'NN'
return tag_dict.get(pos_tag[0], 'NOUN')
# example
words = word_tokenize(text)
words_pos = nltk.pos_tag(words)
full_tag_words_pos = [word_pos[0] + "/" + get_full_tag_pos(word_pos[1]) for word_pos in words_pos]
I wanted to use wordnet lemmatizer in python and I have learnt that the default pos tag is NOUN and that it does not output the correct lemma for a verb, unless the pos tag is explicitly specified as VERB.
My question is what is the best shot inorder to perform the above lemmatization accurately?
I did the pos tagging using nltk.pos_tag and I am lost in integrating the tree bank pos tags to wordnet compatible pos tags. Please help
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr = WordNetLemmatizer()
tagged = nltk.pos_tag(tokens)
I get the output tags in NN,JJ,VB,RB. How do I change these to wordnet compatible tags?
Also do I have to train nltk.pos_tag() with a tagged corpus or can I use it directly on my data to evaluate?
First of all, you can use nltk.pos_tag() directly without training it.
The function will load a pretrained tagger from a file. You can see the file name
with nltk.tag._POS_TAGGER:
nltk.tag._POS_TAGGER
>>> 'taggers/maxent_treebank_pos_tagger/english.pickle'
As it was trained with the Treebank corpus, it also uses the Treebank tag set.
The following function would map the treebank tags to WordNet part of speech names:
from nltk.corpus import wordnet
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return ''
You can then use the return value with the lemmatizer:
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
lemmatizer.lemmatize('going', wordnet.VERB)
>>> 'go'
Check the return value before passing it to the Lemmatizer because an empty string would give a KeyError.
Steps to convert : Document->Sentences->Tokens->POS->Lemmas
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
#example text text = 'What can I say about this place. The staff of these restaurants is nice and the eggplant is not bad'
class Splitter(object):
"""
split the document into sentences and tokenize each sentence
"""
def __init__(self):
self.splitter = nltk.data.load('tokenizers/punkt/english.pickle')
self.tokenizer = nltk.tokenize.TreebankWordTokenizer()
def split(self,text):
"""
out : ['What', 'can', 'I', 'say', 'about', 'this', 'place', '.']
"""
# split into single sentence
sentences = self.splitter.tokenize(text)
# tokenization in each sentences
tokens = [self.tokenizer.tokenize(sent) for sent in sentences]
return tokens
class LemmatizationWithPOSTagger(object):
def __init__(self):
pass
def get_wordnet_pos(self,treebank_tag):
"""
return WORDNET POS compliance to WORDENT lemmatization (a,n,r,v)
"""
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
# As default pos in lemmatization is Noun
return wordnet.NOUN
def pos_tag(self,tokens):
# find the pos tagginf for each tokens [('What', 'WP'), ('can', 'MD'), ('I', 'PRP') ....
pos_tokens = [nltk.pos_tag(token) for token in tokens]
# lemmatization using pos tagg
# convert into feature set of [('What', 'What', ['WP']), ('can', 'can', ['MD']), ... ie [original WORD, Lemmatized word, POS tag]
pos_tokens = [ [(word, lemmatizer.lemmatize(word,self.get_wordnet_pos(pos_tag)), [pos_tag]) for (word,pos_tag) in pos] for pos in pos_tokens]
return pos_tokens
lemmatizer = WordNetLemmatizer()
splitter = Splitter()
lemmatization_using_pos_tagger = LemmatizationWithPOSTagger()
#step 1 split document into sentence followed by tokenization
tokens = splitter.split(text)
#step 2 lemmatization using pos tagger
lemma_pos_token = lemmatization_using_pos_tagger.pos_tag(tokens)
print(lemma_pos_token)
As in the source code of nltk.corpus.reader.wordnet (http://www.nltk.org/_modules/nltk/corpus/reader/wordnet.html)
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
POS_LIST = [NOUN, VERB, ADJ, ADV]
You can create a map using the python default dict and take advantage of the fact that for the lemmatizer the default tag is Noun.
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import word_tokenize, pos_tag
from collections import defaultdict
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
text = "Another way of achieving this task"
tokens = word_tokenize(text)
lmtzr = WordNetLemmatizer()
for token, tag in pos_tag(tokens):
lemma = lmtzr.lemmatize(token, tag_map[tag[0]])
print(token, "=>", lemma)
#Suzana_K was working. But I there are some case result in KeyError as # Clock Slave mention.
Convert treebank tags to Wordnet tag
from nltk.corpus import wordnet
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None # for easy if-statement
Now, we only input pos into lemmatize function only if we have wordnet tag
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
tagged = nltk.pos_tag(tokens)
for word, tag in tagged:
wntag = get_wordnet_pos(tag)
if wntag is None:# not supply tag in case of None
lemma = lemmatizer.lemmatize(word)
else:
lemma = lemmatizer.lemmatize(word, pos=wntag)
You can do as following:
import nltk
from nltk.corpus import wordnet
wordnet_map = {
"N": wordnet.NOUN,
"V": wordnet.VERB,
"J": wordnet.ADJ,
"R": wordnet.ADV
}
def pos_tag_wordnet(text):
"""
Create pos_tag with wordnet format
"""
pos_tagged_text = nltk.pos_tag(text)
# map the pos tagging output with wordnet output
pos_tagged_text = [
(word, wordnet_map.get(pos_tag[0])) if pos_tag[0] in wordnet_map.keys()
else (word, wordnet.NOUN)
for (word, pos_tag) in pos_tagged_text
]
return pos_tagged_text
You can do this in one line:
wnpos = lambda e: ('a' if e[0].lower() == 'j' else e[0].lower()) if e[0].lower() in ['n', 'r', 'v'] else 'n'
Then use wnpos(nltk_pos) to get the POS to give to .lemmatize(). In your case, lmtzr.lemmatize(word=tagged[0][0], pos=wnpos(tagged[0][1])).
After searching from internet, I've found this solution: from sentence to "bag of words" derived after splitting, pos_tagging, lemmatizing and cleaning (from punctuation and "stopping words") operations.
Here's my code:
from nltk.corpus import wordnet as wn
from nltk.wsd import lesk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
punctuation = u",.?!()-_\"\'\\\n\r\t;:+*<>##ยง^$%&|/"
stop_words_eng = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
tag_dict = {"J": wn.ADJ,
"N": wn.NOUN,
"V": wn.VERB,
"R": wn.ADV}
def extract_wnpostag_from_postag(tag):
#take the first letter of the tag
#the second parameter is an "optional" in case of missing key in the dictionary
return tag_dict.get(tag[0].upper(), None)
def lemmatize_tupla_word_postag(tupla):
"""
giving a tupla of the form (wordString, posTagString) like ('guitar', 'NN'), return the lemmatized word
"""
tag = extract_wnpostag_from_postag(tupla[1])
return lemmatizer.lemmatize(tupla[0], tag) if tag is not None else tupla[0]
def bag_of_words(sentence, stop_words=None):
if stop_words is None:
stop_words = stop_words_eng
original_words = word_tokenize(sentence)
tagged_words = nltk.pos_tag(original_words) #returns a list of tuples: (word, tagString) like ('And', 'CC')
original_words = None
lemmatized_words = [ lemmatize_tupla_word_postag(ow) for ow in tagged_words ]
tagged_words = None
cleaned_words = [ w for w in lemmatized_words if (w not in punctuation) and (w not in stop_words) ]
lemmatized_words = None
return cleaned_words
sentence = "Two electric guitar rocks players, and also a better bass player, are standing off to two sides reading corpora while walking"
print(sentence, "\n\n bag of words:\n", bag_of_words(sentence) )
Thanks to "alvas" code from here , Named Entity Recognition with Regular Expression: NLTK and as an example:
from nltk import ne_chunk, pos_tag
from nltk.tokenize import word_tokenize
from nltk.tree import Tree
def get_continuous_chunks(text):
chunked = ne_chunk(pos_tag(word_tokenize(text)))
prev = None
continuous_chunk = []
current_chunk = []
for i in chunked:
if type(i) == Tree:
current_chunk.append(" ".join([token for token, pos in i.leaves()]))
elif current_chunk:
named_entity = " ".join(current_chunk)
if named_entity not in continuous_chunk:
continuous_chunk.append(named_entity)
current_chunk = []
else:
continue
return continuous_chunk
txt = 'The new GOP era in Washington got off to a messy start Tuesday as House Republicans,under pressure from President-elect Donald Trump.'
print (get_continuous_chunks(txt))
the output is :
['GOP', 'Washington', 'House Republicans', 'Donald Trump']
I replaced this text with this : txt = df['content'][38] from my dataset and I get this result :
['Ina', 'Tori K.', 'Martin Cuilla', 'Phillip K', 'John J Lavorato']
This dataset has many rows and one column named 'content'.My question is how can I use this code to extract names from this column for each row and store that names in another column and corresponding rows?
import os
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from nltk.tree import Tree
st = StanfordNERTagger(stanford_classifier, stanford_ner_path, encoding='utf-8')
text = df['content']
tokenized_text = word_tokenize(text)
classified_text = st.tag(tokenized_text)
print (classified_text)
Try apply:
df['ne'] = df['content'].apply(get_continuous_chunks)
For the code in your second example, create a function and apply it the same way:
def my_st(text):
tokenized_text = word_tokenize(text)
return st.tag(tokenized_text)
df['st'] = df['content'].apply(my_st)
I have written some code to find the term frequency and document frequency of words that contained in file stored at location path. Each file is go through the function cleanDoc() to get the words from text files and I want to file the term frequency in the tabled manner means so that all words from all documents should be considered to find count. Can anybody tell how should I implement it? I am only using NLTK.
import collections
import os.path
import glob
import nltk
wdict = set()
path = "C://Python27//Corpus Files//*.*"
#this function cleans up a doc (removes stopwords etc)
def cleanDoc(doc):
stopset = set(nltk.corpus.stopwords.words('english'))
stemmer = nltk.PorterStemmer()
tokens = nltk.WordPunctTokenizer().tokenize(doc)
clean = [token.lower() for token in tokens if token.lower() not in stopset and len(token) > 3 and token.isalpha()]
final = [stemmer.stem(word) for word in clean]
return final
for text in glob.glob(path):
f = open(text)
data= f.read()
words = cleanDoc(data)
wdict.update(words)
You can use the FreqDist object, from nltk.probability to count these words. Later, you can navigate in it using a dict-like key-value interface and methods (like freq.items() or freq['word']), or you can even plot the results using matplotlib.
import collections
import os.path
import glob
import nltk
from nltk.probability import FreqDist
term_frequency = {}
path = "C://Python27//Corpus Files//*.*"
#this function cleans up a doc (removes stopwords etc)
def cleanDoc(doc):
stopset = set(nltk.corpus.stopwords.words('english'))
stemmer = nltk.PorterStemmer()
tokens = nltk.WordPunctTokenizer().tokenize(doc)
clean = [token.lower() for token in tokens if token.lower() not in stopset and len(token) > 3 and token.isalpha()]
final = [stemmer.stem(word) for word in clean]
return final
for text in glob.glob(path):
f = open(text)
data = f.read()
words = cleanDoc(data)
numbers_of_words = len(words)
freq = FreqDist(all_words)
# term_frequency is a dict which structure is like:
# {
# 'path_to_file':
# {'term': 13.4, 'another_term': 15},
# 'another_file':
# {'term2': 12, 'foo': 15}
# }
for term in freq.keys():
if isintance(term_frequency[text], dict):
term_frequency[text][term] = freq[term]/numbers_of_words
else:
term_frequency[text] = {term: freq[term]/numbers_of_words}
Reference: https://nltk.googlecode.com/svn/trunk/doc/api/nltk.probability.FreqDist-class.html