Fastest way to increment counters in dictionary - python

What would be the fastest way to increment counters stored in a dictionary?
Because I have to do this same operation hundreds of thousand of times, I'm looking for something more efficient than what I have below:
def funcA(a):
keys = [x for x in range(1, 51)]
adict = {key: 0 for key in keys}
for k in adict.keys(): # this is the code I would like to improve
if k <= a:
adict[k] += 1
else:
break
import timeit
number = 100000
t1 = timeit.timeit(
'funcA(5)',
setup="from __main__ import funcA", number=number)
print(t1)
>>> 0.42629639082588255
Trying to use a list comprehension instead seems to slow down everything a bit, maybe because it's lacking the break statement?
def funcB(a):
# not working, invalid syntax
keys = [x for x in range(1, 51)]
adict = {key: 0 for key in keys}
def _inc(x):
x += 1
return x
[_inc(adict[k]) for k in adict.keys() if k <= a]
# Timing: 0.5831785711925477
Note: initially I had if float(k) <= float(a): but since I'm only expecting numbers (integers or floats), removing the float() conversion improved the code. Is this assumption reasonable?
Note2: as noted in several comments, the break statement can give unexpected results in the resulting dictionary, so is better to just do:
def funcA(a):
keys = [x for x in range(1, 51)]
adict = {key: 0 for key in keys}
for k in adict:
if k <= a:
adict[k] += 1
# Timing: 0.5132114209700376

In your case you could just use the fact that booleans (the result of the comparison) can be simply converted to integers. It may not be the fastest but it's definitely short and "relatively" fast:
def funcA(a):
adict = {key: int(key <= a) for key in range(1, 51)}
This is assuming that the second function is actually what you want because the first one could give different results because of the break. Dictionaries are unordered so it could not increment some values for keys smaller or equal to a. Also it doesn't increment the values, it just sets them to 1 or 0 because you actually don't need addition in this case.
However, that's not necessarily the fastest way because it has to do a lot of functions calls and int lookups. So I'll present some more equivalent operations in order of performance (fastest to slowest):
def cached_version():
range_cache = range(1, 51)
cache = dict.fromkeys(range_cache, 0)
def inner(a):
adict = cache.copy()
for key in range_cache[:a]: # requires a to be an integer!
adict[key] = 1
return adict
return inner
func1 = cached_version() # initialize cache
def func2(a):
keys = range(1, 51)
adict = dict.fromkeys(keys[:a], 1) # requires a to be an integer!
for key in keys[a:]:
adict[key] = 0
return adict
def func3(a):
adict = {}
for key in range(1, 51):
if key <= a:
adict[key] = 1
else:
adict[key] = 0
return adict
def func4(a):
return {key: 1 if key <= a else 0 for key in range(1, 51)}
def func5(a):
keys = range(1, 51)
adict = dict.fromkeys(keys[:a], 1) # requires a to be an integer!
adict.update(dict.fromkeys(keys[a:], 0))
return adict
def func6(a):
return dict(zip(range(1, 51), [1]*a + [0]*(49-a))) # requires a to be an integer!
from itertools import chain
def func7(a):
return dict(zip(range(1, 51), chain([1]*a, [0]*(49-a)))) # requires a to be an integer!
def func8(a): # the one I originally mentioned
adict = {key: int(key <= a) for key in range(1, 51)}
The timings were done on Python 3.5, Windows 10, there could be differences on other machines and other Python versions. Also note that the performance could be totally different if you had more keys instead of just range(1, 51).

Related

Sort string by character frequency, breaking ties alphabetically

I want to solve this problem:
Given an input string, sort the characters in decreasing order of frequency. If more than one characters had same frequency count, sort them in increasing lexicographical order. Example:
bdca --> abcd,
bdcda -> ddabc,
abba -> aabb,
bacbdc -> bbccad,
My solution involves creating the frequencies in a hash map, sorting the hash map dict items by frequency using sorted() and lambda function. Then for the items with the same frequency (I need to write a subroutine for this), I do another sorted with lambda function.
def string_sort(s):
hmap = {}
for char in s:
if char not in hmap:
hmap[char] = 1
else:
hmap[char] += 1
freqs = sorted(hmap.items(), key=lambda x: x[1], reverse=True)
num_occur: list = find_num_occur(freqs)
sorted_freqs = []
start_ind = 0
for f in num_occur:
tmp_freqs = sorted(freqs[start_ind : start_ind + f], key=lambda x: x[0])
sorted_freqs.extend(tmp_freqs)
start_ind = len(sorted_freqs)
out = []
for item in sorted_freqs:
out.extend([item[0]] * item[1])
return "".join(out)
def find_num_occur(freqs):
count = 1
out = []
for i in range(len(freqs) - 1):
if freqs[i][1] == freqs[i + 1][1]:
count += 1
else:
out.append(count)
count = 1
out.append(count)
return out
The solution is not elegant. I was told I can solve it easier if using comparators, but I don't know how to use comparators in python. Any suggestions? Or any other more elegant solution?
Thanks.
You don't need to use comparators, you can use a key-function just fine. There's a lot of unnecessary complexity in your solution, all you need is something to the effect of:
>>> from collections import Counter
>>> def transmogrify(s):
... counts = Counter(s)
... return ''.join(sorted(s, key=lambda c: (-counts[c], c)))
...
>>> transmogrify('bdca')
'abcd'
>>> transmogrify('bdcda')
'ddabc'
>>> transmogrify('abba')
'aabb'
>>> transmogrify('bacbdc')
'bbccad'
Note, a collections.Counter is just a dict that is specialized for counting, since it is a common enough pattern.
>>> Counter('bacbdc')
Counter({'b': 2, 'c': 2, 'a': 1, 'd': 1})
Counter & __lt__ Method
I think your task can be divided into 2 tasks:
count task, this could be implemented easily by Counter class, but Counter cannot guarantee the order of its keys, so you need task 2
sort task (comparators like CPP), you need a custom sort func, so you can create a new class and implement the __lt__ method, doc
from collections import Counter
class Item:
def __init__(self, ch, times):
self.ch = ch
self.times = times
def __lt__(self, other):
if self.times > other.times:
return True
if self.times == other.times:
return self.ch < other.ch
return False
# my restruct
def restruct(inp):
c = Counter(inp)
data = sorted([Item(ch, times) for ch, times in c.items()])
return ''.join([item.ch*item.times for item in data])
Following restruct a very elegant implement (with the same ability) by #juanpa.arrivillaga, thank him.
def restruct(inp):
c = Counter(inp)
return ''.join(sorted(inp, key=lambda ch: Item(ch, c[ch])))
Then try restruct('bacbdc') get bbccad, bingo!!!

Generate a dictionary of all possible Kakuro solutions

I'm just starting out with Python and had an idea to try to generate a dictionary of all the possible solutions for a Kakuro puzzle. There are a few posts out there about these puzzles, but none that show how to generate said dictionary. What I'm after is a dictionary that has keys from 3-45, with their values being tuples of the integers which sum to the key (so for example mydict[6] = ([1,5],[2,4],[1,2,3])). It is essentially a Subset Sum Problem - https://mathworld.wolfram.com/SubsetSumProblem.html
I've had a go at this myself and have it working for tuples up to three digits long. My method requires a loop for each additional integer in the tuple, so would require me to write some very repetitive code! Is there a better way to do this? I feel like i want to loop the creation of loops, if that is a thing?
def kakuro():
L = [i for i in range(1,10)]
mydict = {}
for i in L:
L1 = L[i:]
for j in L1:
if i+j in mydict:
mydict[i+j].append((i,j))
else:
mydict[i+j] = [(i,j)]
L2 = L[j:]
for k in L2:
if i+j+k in mydict:
mydict[i+j+k].append((i,j,k))
else:
mydict[i+j+k] = [(i,j,k)]
for i in sorted (mydict.keys()):
print(i,mydict[i])
return
my attempt round 2 - getting better!
def kakurodict():
from itertools import combinations as combs
L = [i for i in range(1,10)]
mydict={}
mydict2={}
for i in L[1:]:
mydict[i] = list(combs(L,i))
for j in combs(L,i):
val = sum(j)
if val in mydict2:
mydict2[val].append(j)
else:
mydict2[val] = [j]
return mydict2
So this is written with the following assumptions.
dict[n] cannot have a list with the value [n].
Each element in the subset has to be unique.
I hope there is a better solution offered by someone else, because when we generate all subsets for values 3-45, it takes quite some time. I believe the time complexity of the subset sum generation problem is 2^n so if n is 45, it's not ideal.
import itertools
def subsetsums(max):
if (max < 45):
numbers = [x for x in range(1, max)]
else:
numbers = [x for x in range(1, 45)]
result = [list(seq) for i in range(len(numbers), 0, -1) for seq in itertools.combinations(numbers, i) if sum(seq) == max]
return(result)
mydict = {}
for i in range(3, 46):
mydict[i] = subsetsums(i)
print(mydict)

get minimum and maximum values from a 'for in' loop

First post & I've probably got no business being here, but here goes...
How do I find the maximum and minimum values from the output of a 'for in' loop?
I've tried the min() and max() and get the following error...
TypeError: 'int' object is not iterable
here's my code...
import urllib2
import json
def printResults(data):
# Use the json module to load the string data into a dictionary
theJSON = json.loads(data)
# test bed for accessing the data
for i in theJSON["features"]:
t = i["properties"]["time"]
print t
def main():
# define a variable to hold the source URL
urlData = "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
# Open the URL and read the data
webUrl = urllib2.urlopen(urlData)
#print webUrl.getcode()
if (webUrl.getcode() == 200):
data = webUrl.read()
# print out our customized results
printResults(data)
else:
print "Received an error from server, cannot retrieve results " + str(webUrl.getcode())
if __name__ == "__main__":
main()
Any pointers will be greatly appreciated!
You can use min and max on iterables. Since you are looping through theJSON["features"], you can use:
print min(e["properties"]["time"] for e in theJSON["features"])
print max(e["properties"]["time"] for e in theJSON["features"])
You can also store the result in a variable, so you can use it later:
my_min = min(...)
my_max = max(...)
As #Sabyasachi commented you can also use:
print min(theJSON["features"], key = lambda x:x["properties"]["time"])
Here is an example of how you can manually keep track of a min and max.
minVal = 0
maxVal = 0
for i in yourJsonThingy:
if i < minVal:
minVal = i
if i > maxVal:
maxVal = i
You can't do this:
for i in yourJsonThingy:
maxVal = max(i)
Because i is just an integer and doesn't have a max
But you can perform those operations on a list of ints
maxVal = max(yourJsonThingy)
minVal = min(yourJsonThingy)
In the case that you only want to go through your iterable once, (say it's an expensive operation to do, and really that's the only reason you should do this, instead of doing max or min separately, but that said, the below is a performance improvement on calling both separately, see numbers below):
def max_min(iterable, key=None):
'''
returns a tuple of the max, min of iterable, optional function key
tuple items are None if iterable is of length 0
'''
it = iter(iterable)
_max = _min = next(it, None)
if key is None:
for i in it:
if i > _max:
_max = i
elif i < _min:
_min = i
else:
_max_key = _min_key = key(_max)
for i in it:
key_i = key(i)
if key_i > _max_key:
_max, _max_key = i, key_i
elif key_i < _min_key:
_min, _min_key = i, key_i
return _max, _min
usage:
>>> max_min(range(100))
(99, 0)
>>> max_min(range(100), key=lambda x: -x)
(0, 99)
To performance check:
>>> timeit.timeit('max(range(1000)), min(range(1000))', setup=setup)
70.95577674100059
>>> timeit.timeit('max_min(range(1000))', setup=setup)
65.00369232000958
Which is about a 9% improvement on calling both builtins, max and min, without a lambda, separately. With a lambda:
>>> timeit.timeit('max(range(1000), key=lambda x: -x),min(range(1000), key=lambda x: -x)', setup=setup)
294.17539755300095
>>> timeit.timeit('max_min(range(1000), key=lambda x: -x)', setup=setup)
208.95339999899443
Which is a more than 40% improvement on calling each separately with lambdas.

Python: Check the occurrences in a list against a value

lst = [1,2,3,4,1]
I want to know 1 occurs twice in this list, is there any efficient way to do?
lst.count(1) would return the number of times it occurs. If you're going to be counting items in a list, O(n) is what you're going to get.
The general function on the list is list.count(x), and will return the number of times x occurs in a list.
Are you asking whether every item in the list is unique?
len(set(lst)) == len(lst)
Whether 1 occurs more than once?
lst.count(1) > 1
Note that the above is not maximally efficient, because it won't short-circuit -- even if 1 occurs twice, it will still count the rest of the occurrences. If you want it to short-circuit you will have to write something a little more complicated.
Whether the first element occurs more than once?
lst[0] in lst[1:]
How often each element occurs?
import collections
collections.Counter(lst)
Something else?
For multiple occurrences, this give you the index of each occurence:
>>> lst=[1,2,3,4,5,1]
>>> tgt=1
>>> found=[]
>>> for index, suspect in enumerate(lst):
... if(tgt==suspect):
... found.append(index)
...
>>> print len(found), "found at index:",", ".join(map(str,found))
2 found at index: 0, 5
If you want the count of each item in the list:
>>> lst=[1,2,3,4,5,2,2,1,5,5,5,5,6]
>>> count={}
>>> for item in lst:
... count[item]=lst.count(item)
...
>>> count
{1: 2, 2: 3, 3: 1, 4: 1, 5: 5, 6: 1}
def valCount(lst):
res = {}
for v in lst:
try:
res[v] += 1
except KeyError:
res[v] = 1
return res
u = [ x for x,y in valCount(lst).iteritems() if y > 1 ]
u is now a list of all values which appear more than once.
Edit:
#katrielalex: thank you for pointing out collections.Counter, of which I was not previously aware. It can also be written more concisely using a collections.defaultdict, as demonstrated in the following tests. All three methods are roughly O(n) and reasonably close in run-time performance (using collections.defaultdict is in fact slightly faster than collections.Counter).
My intention was to give an easy-to-understand response to what seemed a relatively unsophisticated request. Given that, are there any other senses in which you consider it "bad code" or "done poorly"?
import collections
import random
import time
def test1(lst):
res = {}
for v in lst:
try:
res[v] += 1
except KeyError:
res[v] = 1
return res
def test2(lst):
res = collections.defaultdict(lambda: 0)
for v in lst:
res[v] += 1
return res
def test3(lst):
return collections.Counter(lst)
def rndLst(lstLen):
r = random.randint
return [r(0,lstLen) for i in xrange(lstLen)]
def timeFn(fn, *args):
st = time.clock()
res = fn(*args)
return time.clock() - st
def main():
reps = 5000
res = []
tests = [test1, test2, test3]
for t in xrange(reps):
lstLen = random.randint(10,50000)
lst = rndLst(lstLen)
res.append( [lstLen] + [timeFn(fn, lst) for fn in tests] )
res.sort()
return res
And the results, for random lists containing up to 50,000 items, are as follows:
(Vertical axis is time in seconds, horizontal axis is number of items in list)
Another way to get all items that occur more than once:
lst = [1,2,3,4,1]
d = {}
for x in lst:
d[x] = x in d
print d[1] # True
print d[2] # False
print [x for x in d if d[x]] # [1]
You could also sort the list which is O(n*log(n)), then check the adjacent elements for equality, which is O(n). The result is O(n*log(n)). This has the disadvantage of requiring the entire list be sorted before possibly bailing when a duplicate is found.
For a large list with a relatively rare duplicates, this could be the about the best you can do. The best way to approach this really does depend on the size of the data involved and its nature.

Find the most common element in a list

What is an efficient way to find the most common element in a Python list?
My list items may not be hashable so can't use a dictionary.
Also in case of draws the item with the lowest index should be returned. Example:
>>> most_common(['duck', 'duck', 'goose'])
'duck'
>>> most_common(['goose', 'duck', 'duck', 'goose'])
'goose'
A simpler one-liner:
def most_common(lst):
return max(set(lst), key=lst.count)
Borrowing from here, this can be used with Python 2.7:
from collections import Counter
def Most_Common(lst):
data = Counter(lst)
return data.most_common(1)[0][0]
Works around 4-6 times faster than Alex's solutions, and is 50 times faster than the one-liner proposed by newacct.
On CPython 3.6+ (any Python 3.7+) the above will select the first seen element in case of ties. If you're running on older Python, to retrieve the element that occurs first in the list in case of ties you need to do two passes to preserve order:
# Only needed pre-3.6!
def most_common(lst):
data = Counter(lst)
return max(lst, key=data.get)
With so many solutions proposed, I'm amazed nobody's proposed what I'd consider an obvious one (for non-hashable but comparable elements) -- [itertools.groupby][1]. itertools offers fast, reusable functionality, and lets you delegate some tricky logic to well-tested standard library components. Consider for example:
import itertools
import operator
def most_common(L):
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
# print 'SL:', SL
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
This could be written more concisely, of course, but I'm aiming for maximal clarity. The two print statements can be uncommented to better see the machinery in action; for example, with prints uncommented:
print most_common(['goose', 'duck', 'duck', 'goose'])
emits:
SL: [('duck', 1), ('duck', 2), ('goose', 0), ('goose', 3)]
item 'duck', count 2, minind 1
item 'goose', count 2, minind 0
goose
As you see, SL is a list of pairs, each pair an item followed by the item's index in the original list (to implement the key condition that, if the "most common" items with the same highest count are > 1, the result must be the earliest-occurring one).
groupby groups by the item only (via operator.itemgetter). The auxiliary function, called once per grouping during the max computation, receives and internally unpacks a group - a tuple with two items (item, iterable) where the iterable's items are also two-item tuples, (item, original index) [[the items of SL]].
Then the auxiliary function uses a loop to determine both the count of entries in the group's iterable, and the minimum original index; it returns those as combined "quality key", with the min index sign-changed so the max operation will consider "better" those items that occurred earlier in the original list.
This code could be much simpler if it worried a little less about big-O issues in time and space, e.g....:
def most_common(L):
groups = itertools.groupby(sorted(L))
def _auxfun((item, iterable)):
return len(list(iterable)), -L.index(item)
return max(groups, key=_auxfun)[0]
same basic idea, just expressed more simply and compactly... but, alas, an extra O(N) auxiliary space (to embody the groups' iterables to lists) and O(N squared) time (to get the L.index of every item). While premature optimization is the root of all evil in programming, deliberately picking an O(N squared) approach when an O(N log N) one is available just goes too much against the grain of scalability!-)
Finally, for those who prefer "oneliners" to clarity and performance, a bonus 1-liner version with suitably mangled names:-).
from itertools import groupby as g
def most_common_oneliner(L):
return max(g(sorted(L)), key=lambda(x, v):(len(list(v)),-L.index(x)))[0]
What you want is known in statistics as mode, and Python of course has a built-in function to do exactly that for you:
>>> from statistics import mode
>>> mode([1, 2, 2, 3, 3, 3, 3, 3, 4, 5, 6, 6, 6])
3
Note that if there is no "most common element" such as cases where the top two are tied, this will raise StatisticsError on Python
<=3.7, and on 3.8 onwards it will return the first one encountered.
Without the requirement about the lowest index, you can use collections.Counter for this:
from collections import Counter
a = [1936, 2401, 2916, 4761, 9216, 9216, 9604, 9801]
c = Counter(a)
print(c.most_common(1)) # the one most common element... 2 would mean the 2 most common
[(9216, 2)] # a set containing the element, and it's count in 'a'
If they are not hashable, you can sort them and do a single loop over the result counting the items (identical items will be next to each other). But it might be faster to make them hashable and use a dict.
def most_common(lst):
cur_length = 0
max_length = 0
cur_i = 0
max_i = 0
cur_item = None
max_item = None
for i, item in sorted(enumerate(lst), key=lambda x: x[1]):
if cur_item is None or cur_item != item:
if cur_length > max_length or (cur_length == max_length and cur_i < max_i):
max_length = cur_length
max_i = cur_i
max_item = cur_item
cur_length = 1
cur_i = i
cur_item = item
else:
cur_length += 1
if cur_length > max_length or (cur_length == max_length and cur_i < max_i):
return cur_item
return max_item
This is an O(n) solution.
mydict = {}
cnt, itm = 0, ''
for item in reversed(lst):
mydict[item] = mydict.get(item, 0) + 1
if mydict[item] >= cnt :
cnt, itm = mydict[item], item
print itm
(reversed is used to make sure that it returns the lowest index item)
Sort a copy of the list and find the longest run. You can decorate the list before sorting it with the index of each element, and then choose the run that starts with the lowest index in the case of a tie.
A one-liner:
def most_common (lst):
return max(((item, lst.count(item)) for item in set(lst)), key=lambda a: a[1])[0]
I am doing this using scipy stat module and lambda:
import scipy.stats
lst = [1,2,3,4,5,6,7,5]
most_freq_val = lambda x: scipy.stats.mode(x)[0][0]
print(most_freq_val(lst))
Result:
most_freq_val = 5
# use Decorate, Sort, Undecorate to solve the problem
def most_common(iterable):
# Make a list with tuples: (item, index)
# The index will be used later to break ties for most common item.
lst = [(x, i) for i, x in enumerate(iterable)]
lst.sort()
# lst_final will also be a list of tuples: (count, index, item)
# Sorting on this list will find us the most common item, and the index
# will break ties so the one listed first wins. Count is negative so
# largest count will have lowest value and sort first.
lst_final = []
# Get an iterator for our new list...
itr = iter(lst)
# ...and pop the first tuple off. Setup current state vars for loop.
count = 1
tup = next(itr)
x_cur, i_cur = tup
# Loop over sorted list of tuples, counting occurrences of item.
for tup in itr:
# Same item again?
if x_cur == tup[0]:
# Yes, same item; increment count
count += 1
else:
# No, new item, so write previous current item to lst_final...
t = (-count, i_cur, x_cur)
lst_final.append(t)
# ...and reset current state vars for loop.
x_cur, i_cur = tup
count = 1
# Write final item after loop ends
t = (-count, i_cur, x_cur)
lst_final.append(t)
lst_final.sort()
answer = lst_final[0][2]
return answer
print most_common(['x', 'e', 'a', 'e', 'a', 'e', 'e']) # prints 'e'
print most_common(['goose', 'duck', 'duck', 'goose']) # prints 'goose'
Building on Luiz's answer, but satisfying the "in case of draws the item with the lowest index should be returned" condition:
from statistics import mode, StatisticsError
def most_common(l):
try:
return mode(l)
except StatisticsError as e:
# will only return the first element if no unique mode found
if 'no unique mode' in e.args[0]:
return l[0]
# this is for "StatisticsError: no mode for empty data"
# after calling mode([])
raise
Example:
>>> most_common(['a', 'b', 'b'])
'b'
>>> most_common([1, 2])
1
>>> most_common([])
StatisticsError: no mode for empty data
Simple one line solution
moc= max([(lst.count(chr),chr) for chr in set(lst)])
It will return most frequent element with its frequency.
You probably don't need this anymore, but this is what I did for a similar problem. (It looks longer than it is because of the comments.)
itemList = ['hi', 'hi', 'hello', 'bye']
counter = {}
maxItemCount = 0
for item in itemList:
try:
# Referencing this will cause a KeyError exception
# if it doesn't already exist
counter[item]
# ... meaning if we get this far it didn't happen so
# we'll increment
counter[item] += 1
except KeyError:
# If we got a KeyError we need to create the
# dictionary key
counter[item] = 1
# Keep overwriting maxItemCount with the latest number,
# if it's higher than the existing itemCount
if counter[item] > maxItemCount:
maxItemCount = counter[item]
mostPopularItem = item
print mostPopularItem
ans = [1, 1, 0, 0, 1, 1]
all_ans = {ans.count(ans[i]): ans[i] for i in range(len(ans))}
print(all_ans)
all_ans={4: 1, 2: 0}
max_key = max(all_ans.keys())
4
print(all_ans[max_key])
1
#This will return the list sorted by frequency:
def orderByFrequency(list):
listUniqueValues = np.unique(list)
listQty = []
listOrderedByFrequency = []
for i in range(len(listUniqueValues)):
listQty.append(list.count(listUniqueValues[i]))
for i in range(len(listQty)):
index_bigger = np.argmax(listQty)
for j in range(listQty[index_bigger]):
listOrderedByFrequency.append(listUniqueValues[index_bigger])
listQty[index_bigger] = -1
return listOrderedByFrequency
#And this will return a list with the most frequent values in a list:
def getMostFrequentValues(list):
if (len(list) <= 1):
return list
list_most_frequent = []
list_ordered_by_frequency = orderByFrequency(list)
list_most_frequent.append(list_ordered_by_frequency[0])
frequency = list_ordered_by_frequency.count(list_ordered_by_frequency[0])
index = 0
while(index < len(list_ordered_by_frequency)):
index = index + frequency
if(index < len(list_ordered_by_frequency)):
testValue = list_ordered_by_frequency[index]
testValueFrequency = list_ordered_by_frequency.count(testValue)
if (testValueFrequency == frequency):
list_most_frequent.append(testValue)
else:
break
return list_most_frequent
#tests:
print(getMostFrequentValues([]))
print(getMostFrequentValues([1]))
print(getMostFrequentValues([1,1]))
print(getMostFrequentValues([2,1]))
print(getMostFrequentValues([2,2,1]))
print(getMostFrequentValues([1,2,1,2]))
print(getMostFrequentValues([1,2,1,2,2]))
print(getMostFrequentValues([3,2,3,5,6,3,2,2]))
print(getMostFrequentValues([1,2,2,60,50,3,3,50,3,4,50,4,4,60,60]))
Results:
[]
[1]
[1]
[1, 2]
[2]
[1, 2]
[2]
[2, 3]
[3, 4, 50, 60]
Here:
def most_common(l):
max = 0
maxitem = None
for x in set(l):
count = l.count(x)
if count > max:
max = count
maxitem = x
return maxitem
I have a vague feeling there is a method somewhere in the standard library that will give you the count of each element, but I can't find it.
This is the obvious slow solution (O(n^2)) if neither sorting nor hashing is feasible, but equality comparison (==) is available:
def most_common(items):
if not items:
raise ValueError
fitems = []
best_idx = 0
for item in items:
item_missing = True
i = 0
for fitem in fitems:
if fitem[0] == item:
fitem[1] += 1
d = fitem[1] - fitems[best_idx][1]
if d > 0 or (d == 0 and fitems[best_idx][2] > fitem[2]):
best_idx = i
item_missing = False
break
i += 1
if item_missing:
fitems.append([item, 1, i])
return items[best_idx]
But making your items hashable or sortable (as recommended by other answers) would almost always make finding the most common element faster if the length of your list (n) is large. O(n) on average with hashing, and O(n*log(n)) at worst for sorting.
>>> li = ['goose', 'duck', 'duck']
>>> def foo(li):
st = set(li)
mx = -1
for each in st:
temp = li.count(each):
if mx < temp:
mx = temp
h = each
return h
>>> foo(li)
'duck'
I needed to do this in a recent program. I'll admit it, I couldn't understand Alex's answer, so this is what I ended up with.
def mostPopular(l):
mpEl=None
mpIndex=0
mpCount=0
curEl=None
curCount=0
for i, el in sorted(enumerate(l), key=lambda x: (x[1], x[0]), reverse=True):
curCount=curCount+1 if el==curEl else 1
curEl=el
if curCount>mpCount \
or (curCount==mpCount and i<mpIndex):
mpEl=curEl
mpIndex=i
mpCount=curCount
return mpEl, mpCount, mpIndex
I timed it against Alex's solution and it's about 10-15% faster for short lists, but once you go over 100 elements or more (tested up to 200000) it's about 20% slower.
def most_frequent(List):
counter = 0
num = List[0]
for i in List:
curr_frequency = List.count(i)
if(curr_frequency> counter):
counter = curr_frequency
num = i
return num
List = [2, 1, 2, 2, 1, 3]
print(most_frequent(List))
Hi this is a very simple solution, with linear time complexity
L = ['goose', 'duck', 'duck']
def most_common(L):
current_winner = 0
max_repeated = None
for i in L:
amount_times = L.count(i)
if amount_times > current_winner:
current_winner = amount_times
max_repeated = i
return max_repeated
print(most_common(L))
"duck"
Where number, is the element in the list that repeats most of the time
numbers = [1, 3, 7, 4, 3, 0, 3, 6, 3]
max_repeat_num = max(numbers, key=numbers.count) *# which number most* frequently
max_repeat = numbers.count(max_repeat_num) *#how many times*
print(f" the number {max_repeat_num} is repeated{max_repeat} times")
def mostCommonElement(list):
count = {} // dict holder
max = 0 // keep track of the count by key
result = None // holder when count is greater than max
for i in list:
if i not in count:
count[i] = 1
else:
count[i] += 1
if count[i] > max:
max = count[i]
result = i
return result
mostCommonElement(["a","b","a","c"]) -> "a"
The most common element should be the one which is appearing more than N/2 times in the array where N being the len(array). The below technique will do it in O(n) time complexity, with just consuming O(1) auxiliary space.
from collections import Counter
def majorityElement(arr):
majority_elem = Counter(arr)
size = len(arr)
for key, val in majority_elem.items():
if val > size/2:
return key
return -1
def most_common(lst):
if max([lst.count(i)for i in lst]) == 1:
return False
else:
return max(set(lst), key=lst.count)
def popular(L):
C={}
for a in L:
C[a]=L.count(a)
for b in C.keys():
if C[b]==max(C.values()):
return b
L=[2,3,5,3,6,3,6,3,6,3,7,467,4,7,4]
print popular(L)

Categories

Resources