I'm working with some of the corpus materials from NLPP. I'm trying to improve my unscrambling score in the code... at the moment I'm hitting 91.250%.
The point of the exercise is to alter the represent_word function to improve the score.
The function consumes a word a string, and this word is either scrambled or unscrambled. The function produces a "representation" of the word, which is a list containing the following information:
word length
number of vowels
number of consonants
first and last letter of the word (these are always unscrambled)
a tuple of the most commonly used words from the corpus, who's characters are also members of the given word input.
I have also tried analysing anagrams of prefixes and suffixes, but they don't contribute anything to the score in the shadow of the most common words with common characters tuple.
I'm not sure why I can't improve the score. I've even tried increasing dictionary size by importing words from another corpus.
The only section that can be altered here is the represent_word function and the definitions just above it. However, I'm including the entire source incase it might yield some insightful information to someones.
import nltk
import re
def word_counts(corpus, wordcounts = {}):
""" Function that counts all the words in the corpus."""
for word in corpus:
wordcounts.setdefault(word.lower(), 0)
wordcounts[word.lower()] += 1
return wordcounts
JA_list = filter(lambda x: x.isalpha(), map(lambda x:x.lower(),
nltk.corpus.gutenberg.words('austen-persuasion.txt')))
JA_freqdist=nltk.FreqDist(JA_list)
JA_toplist=sorted(JA_freqdist.items(),key=lambda x: x[1], reverse=True)[:0]
JA_topwords=[]
for i in JA_toplist:
JA_topwords.append(i[0])
PP_list = filter(lambda x: x.isalpha(),map(lambda x:x.lower(),
open("Pride and Prejudice.txt").read().split()))
PP_freqdist=nltk.FreqDist(PP_list)
PP_toplist=sorted(PP_freqdist.items(),key=lambda x: x[1], reverse=True)[:7]
PP_topwords=[]
for i in PP_toplist:
PP_topwords.append(i[0])
uniquewords=[]
for i in JA_topwords:
if i not in PP_topwords:
uniquewords.append(i)
else:
continue
uniquewords.extend(PP_topwords)
def represent_word(word):
def common_word(word):
dictionary= uniquewords
findings=[]
for string in dictionary:
if all((letter in word) for letter in string):
findings.append(string)
else:
False
if not findings:
return None
else:
return tuple(findings)
vowels = list("aeiouy")
consonants = list("bcdfghjklmnpqrstvexz")
number_of_consonants = sum(word.count(i) for i in consonants)
number_of_vowels = sum(word.count(i) for i in vowels)
split_word=list(word)
common_words=common_word(word)
return tuple([split_word[0],split_word[-1], len(split_word),number_of_consonants, number_of_vowels, common_words])
def create_mapping(words, mapping = {}):
""" Returns a mapping of representations of words to the most common word for that representation. """
for word in words:
representation = represent_word(word)
mapping.setdefault(representation, ("", 0))
if mapping[representation][1] < words[word]:
mapping[representation] = (word, words[word])
return mapping
if __name__ == '__main__':
# Create a mapping of representations of the words in Persuasian by Jane Austen to use as a corpus
words = JA_freqdist
mapping = create_mapping(words)
# Load the words in the scrambled file
with open("Pdrie and Puicejdre.txt") as scrambled_file:
scrambled_lines = [line.split() for line in scrambled_file if len(line.strip()) > 0 ]
scrambled_words = [word.lower() for line in scrambled_lines for word in line]
# Descramble the words using the best mapping
descrambled_words = []
for scrambled_word in scrambled_words:
representation = represent_word(scrambled_word)
if representation in mapping:
descrambled_word = mapping[representation][0]
else:
descrambled_word = scrambled_word
descrambled_words.append(descrambled_word)
# Load the original words
with open("Pride and Prejudice.txt") as original_file:
original_lines = [line.split() for line in original_file if len(line.strip()) > 0 ]
original_words = [word.lower() for line in original_lines for word in line]
# Make a list of word pairs from descrambled_words and original words
word_pairs = zip(descrambled_words, original_words)
# See if the words are the same
judgements = [descrambled_word == original_word for (descrambled_word, original_word) in word_pairs]
# Print the results
print "Correct: {0:.3%}".format(float(judgements.count(True))/len(judgements))
Related
I'm trying to find whenever one of some specific words is used in a TXT file and then count what number word in the file the word is. My code returns the number for some but not all of the words, and I have no idea why.
My code right now goes through the file word by word with a counter and returns the number if the word matches one of the words I want.
def wordnumber(file, filewrite, word1, word2, word3):
import os
wordlist = [word1, word2, word3]
infile = open(file, 'r')
g = open(filewrite, 'w')
g.write("start")
g.write(os.linesep)
lines = infile.read().splitlines()
infile.close()
wordsString = ' '.join(lines)
words = wordsString.split()
n = 1
for w in words:
if w in wordlist:
g.write(str(n))
g.write(os.linesep)
n = n+1
This works sometimes, but for some text files it only returns some of the numbers and leaves others blank.
If you want find the first occurence of the word in your words, just use
wordIndex = words.index(w) if w in words else None
and for all occurences use
wordIndexes = [i for i,x in enumerate(words) if x==word]
(taken from Python: Find in list)
But beware: if your text is "cat, dog, mouse", your code wouldn't find index of "cat" or "dog". Because "cat, dog, mouse".split() returns ['cat,', 'dog,', 'mouse'], and 'cat,' is not 'cat'.
At the moment this code takes in a string from a user and compares it to a text file in which many words are stored. It then outputs all the words that contain an exact match to the string. (E.G "otp = opt, top, pot) Currently when i input the string it only matches the string to the word with the EXACT same letters in a rearranged order.
My question is how do i go about being able to type in excess letters but still output all the words that are contained? for example: Type in "orkignwer" and the program will output "working" even though there are extra letters.
words = []
def isAnAnagram(word, user):
wordList= list(word)
wordList.sort()
inputList= list(user)
inputList.sort()
return (wordList == inputList)
def getAnagrams(user):
lister = [word for word in words if len(word) == len(user) ]
for item in lister:
if isAnAnagram(item, user):
yield item
with open('Dictionary.txt', 'r') as f:
allwords = f.readlines()
f.close()
for x in allwords:
x = x.rstrip()
words.append(x)
inp = 1
while inp != "99":
inp = input("enter word:")
result = getAnagrams(inp)
print(list(result))
You have to edit the isAnAnagram and the getAnagrams functions. First the getAnagrams function should be edited to also include the words of greater length in the lister list:
def getAnagrams(user):
lister = [word for word in words if len(word) <= len(user) ]
for item in lister:
if isAnAnagram(item, user):
yield item
Then you would need to edit the isAnAnagram function. As Alexander Huszagh pointed out, you can use the Counter from the collections package:
from collections import Counter
def isAnAnagram(word, user):
word_counter = Counter(word)
input_counter = Counter(user)
return all(count <= input_counter[key] for key, count in word_counter.items())
The all(count <= input_counter[key] for key, count in word_counter.items()) checks to see if every letter of word appears in user at least as many times as they did in word.
P.S. If you want a more optimized solution, you might want to checkout TRIEs (e.g. MARISA-trie, python-trie or PyTrie).
This is what I have so far, but I'm stuck. I'm using nltk for the word list and trying to find all the words with the letters in "sand". From this list I want to find all the words I can make from the remaining letters.
import nltk.corpus.words.words()
pwordlist = []
for w in wordlist:
if 's' in w:
if 'a' in w:
if 'n' in w:
if 'd' in w:
pwordlist.append(w)
In this case I have to use all the letters to find the words possible.
I think this will work for finding the possible words with the remaining letters, but I can't figure out how to remove only 1 instance of the letters in 'sand'.
puzzle_letters = nltk.FreqDist(x)
[w for w in pwordlist if len(w) = len(pwordlist) and nltk.FreqDist(w) = puzzle_letters]
I would separate the logic into four sections:
A function contains(word, letters), which we'll use to detect whether a word contains "sand"
A function subtract(word, letters), which we'll use to remove "sand" from the word.
A function get_anagrams(word), which finds all of the anagrams of a word.
The main algorithm that combines all of the above to find words that are anagrams of other words once you remove "sand".
from collections import Counter
words = ??? #todo: somehow get a list of every English word.
def contains(word, letters):
return not Counter(letters) - Counter(word)
def subtract(word, letters):
remaining = Counter(word) - Counter(letters)
return "".join(remaining.elements())
anagrams = {}
for word in words:
base = "".join(sorted(word))
anagrams.setdefault(base, []).append(word)
def get_anagrams(word):
return anagrams.get("".join(sorted(word)), [])
for word in words:
if contains(word, "sand"):
reduced_word = subtract(word, "sand")
matches = get_anagrams(reduced_word)
if matches:
print word, matches
Running the above code on the Words With Friends dictionary, I get a lot of results, including:
...
cowhands ['chow']
credentials ['reticle', 'tiercel']
cyanids ['icy']
daftness ['efts', 'fest', 'fets']
dahoons ['oho', 'ooh']
daikons ['koi']
daintiness ['seniti']
daintinesses ['sienites']
dalapons ['opal']
dalesman ['alme', 'lame', 'male', 'meal']
...
Program:
from nltk.corpus import words
from collections import defaultdict
def norm(word):
return ''.join(sorted(word))
completers = defaultdict(list)
for word in words.words():
completers[norm(word + 'sand')].append(word)
for word in words.words():
comps = completers[norm(word)]
if comps:
print(word, comps)
Output:
...
admirableness ['miserable']
adnascent ['enact']
adroitness ['sorite', 'sortie', 'triose']
adscendent ['cedent', 'decent']
adsorption ['portio']
adventuress ['vesture']
adversant ['avert', 'tarve', 'taver', 'trave']
...
Let's answer your question instead of spoiling the fun by doing the whole exercise for you: To remove just one instance of the letter, specify a replacement and give a limit to how many times it should apply:
>>> "Frodo".replace("o", "", 1)
'Frdo'
Or if you need to apply a regexp just once (though in this case you don't need a regexp):
>>> import re
>>> re.sub(r"[od]", "", "Frodo", 1)
'Frdo'
Now if you have a string whose letters (s, a, n, d) you want to remove from a word word, you can simply loop over the string:
>>> for letter in "sand":
word = word.replace(letter, "", word)
I'll leave it to you to embed this in a loop that goes over all words in your wordlist, and to utilize the remaining letters.
My program opens a file and it can word count the words contained in it but i want to create a dictionary consisting of all the unique words in the text
for example if the word 'computer' appears three times i want that to count as one unique word
def main():
file = input('Enter the name of the input file: ')
infile = open(file, 'r')
file_contents = infile.read()
infile.close()
words = file_contents.split()
number_of_words = len(words)
print("There are", number_of_words, "words contained in this paragarph")
main()
Use a set. This will only include unique words:
words = set(words)
If you don't care about case, you can do this:
words = set(word.lower() for word in words)
This assumes there is no punctuation. If there is, you will need to strip the punctuation.
import string
words = set(word.lower().strip(string.punctuation) for word in words)
If you need to keep track of how many of each word you have, just replace set with Counter in the examples above:
import string
from collections import Counter
words = Counter(word.lower().strip(string.punctuation) for word in words)
This will give you a dictionary-like object that tells you how many of each word there is.
You can also get the number of unique words from this (although it is slower if that is all you care about):
import string
from collections import Counter
words = Counter(word.lower().strip(string.punctuation) for word in words)
nword = len(words)
#TheBlackCat his solution works but only gives you how much unique words are in the string/file. This solution also shows you how many times it occurs.
dictionaryName = {}
for word in words:
if word not in list(dictionaryName):
dictionaryName[word] = 1
else:
number = dictionaryName.get(word)
dictionaryName[word] = dictionaryName.get(word) + 1
print dictionaryName
tested with:
words = "Foo", "Bar", "Baz", "Baz"
output: {'Foo': 1, 'Bar': 1, 'Baz': 2}
Probably more cleaner and quick solution:
words_dict = {}
for word in words:
word_count = words_dict.get(word, 0)
words_dict[word] = word_count + 1
I have some code that gives me a list of words with their frequencies that they occur in the text, I'm looking to make it so the code converts the top 10 words automatically into an ARFF with
#RELATION wordfrequencies
#ATTRIBUTE word string
#ATTRIBUTE frequency numeric
and the top 10 as data with their frequency.
I'm struggling with how to do this with my current code
import re
import nltk
# Quran subset
filename = 'subsetQuran.txt'
# create list of lower case words
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
word_list2 = [w.strip() for w in word_list if w.strip() not in nltk.corpus.stopwords.words('english')]
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation and numbers to be removed
punctuation = re.compile(r'[-.?!,":;()|0-9]')
for word in word_list2:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print '-'*30
print "sorted by highest frequency first:"
# create list of (val, key) tuple pairs
freq_list2 = [(val, key) for key, val in freq_dic.items()]
# sort by val or frequency
freq_list2.sort(reverse=True)
freq_list3 = list(freq_list2)
# display result
for freq, word in freq_list2:
print word, freq
f = open("wordfreq.txt", "w")
f.write( str(freq_list3) )
f.close()
Any help with this is appreciated, a way of doing this is really racking my brain!
I hope you don't mind the slight rewrite:
import re
import nltk
from collections import defaultdict
# Quran subset
filename = 'subsetQuran.txt'
# create list of lower case words
word_list = open(filename).read().lower().split()
print 'Words in text:', len(word_list)
# remove stopwords
word_list = [w for w in word_list if w not in nltk.corpus.stopwords.words('english')]
# create dictionary of word:frequency pairs
freq_dic = defaultdict(int)
# punctuation and numbers to be removed
punctuation = re.compile(r'[-.?!,":;()|0-9]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# increment count for word
freq_dic[word] += 1
print '-' * 30
print "sorted by highest frequency first:"
# create list of (frequency, word) tuple pairs
freq_list = [(freq, word) for word, freq in freq_dic.items()]
# sort by descending frequency
freq_list.sort(reverse=True)
# display result
for freq, word in freq_list:
print word, freq
# write ARFF file for 10 most common words
f = open("wordfreq.txt", "w")
f.write("#RELATION wordfrequencies\n")
f.write("#ATTRIBUTE word string\n")
f.write("#ATTRIBUTE frequency numeric\n")
f.write("#DATA\n")
for freq, word in freq_list[ : 10]:
f.write("'%s',%d\n" % (word, freq))
f.close()