Why is this python code taking so long? - python

Alright, I have this python code that compares merge sort and selection sort, but it is taking forever. When done from n = 0 to 90,000 (the size of the list), it only takes about 3 seconds to sort the list. By this logic, it would take about 10 * 3 * 9 seconds (number of run throughs * duration * incremented run throughs [ we start with 10,000 then do 20,000, then 30,000, etc ] ). However, it takes far longer than that.
import time
import random
# Selection Sort Code #
def maxIndex(J):
return J.index(max(J))
def swap(LCopy, i, j):
temp = LCopy[i]
LCopy[i] = LCopy[j]
LCopy[j] = temp
# Implementation of selection sort
def selectionSort(L):
for i in range(len(L)-1, 1, -1):
j = maxIndex(L[0:i+1])
swap(L, i, j)
# Merge Sort Code #
# Assumes that L[first:mid+1] is sorted and also
# that L[mid: last+1] is sorted. Returns L with L[first: last+1] sorted
def merge(L, first, mid, last):
i = first # index into the first half
j = mid + 1 # index into the second half
tempList = []
# This loops goes on as long as BOTH i and j stay within their
# respective sorted blocks
while (i <= mid) and (j <= last):
if L[i] <= L[j]:
tempList.append(L[i])
#print L[i], "from the first block"
i += 1
else:
tempList.append(L[j])
#print L[j], "from the second block"
j += 1
# If i goes beyond the first block, there may be some elements
# in the second block that need to be copied into tempList.
# Similarly, if j goes beyond the second block, there may be some
# elements in the first block that need to be copied into tempList
if i == mid + 1:
tempList.extend(L[j:last+1])
#print L[j:last+1], "some elements in second block are left over"
elif j == last+1:
tempList.extend(L[i:mid+1])
#print L[i:mid+1], "some elements from first block are left over"
L[first:last+1] = tempList
#print tempList
# The merge sort function; sorts the sublist L[first:last+1]
def generalMergeSort(L, first, last):
# Base case: if first == last then it is already sorted
# Recursive case: L[first:last+1] has size 2 or more
if first < last:
# divide step
mid = (first + last)/2
# conquer step
generalMergeSort(L, first, mid)
generalMergeSort(L, mid+1, last)
# combine step
merge(L, first, mid, last)
# Wrapper function
def mergeSort(L):
generalMergeSort(L, 0, len(L)-1)
m = 10
n = 100000
n_increments = 9
baseList = [ random.randint(0,100) for r in range(n) ]
i = 0
while i < n_increments:
j = 0
sel_time = 0
mer_time = 0
while j < m:
# Do a Selection Sort #
x = time.clock()
selectionSort( baseList)
y = time.clock()
sel_time += ( y - x )
random.shuffle( baseList )
# Do a Merge Sort #
x = time.clock()
mergeSort( baseList )
y = time.clock()
mer_time += ( y - x )
random.shuffle( baseList )
j += 1
print "average select sort time for a list of", n, "size:", sel_time / m
print "average merge sort time for a list of", n, "size:", mer_time / m
j = 0
i += 1
n += 10000

Because you are using O(n^2) sorting algorithms. This means that if you double n, the algorithm takes 4 times longer to run. Note that you are starting at 100,000 not 10,000

Related

Making the complexity smaller (better)

I have an algorithm that looks for the good pairs in a list of numbers. A good pair is being considered as index i being less than j and arr[i] < arr[j]. It currently has a complexity of O(n^2) but I want to make it O(nlogn) based on divide and conquering. How can I go about doing that?
Here's the algorithm:
def goodPairs(nums):
count = 0
for i in range(0,len(nums)):
for j in range(i+1,len(nums)):
if i < j and nums[i] < nums[j]:
count += 1
j += 1
j += 1
return count
Here's my attempt at making it but it just returns 0:
def goodPairs(arr):
count = 0
if len(arr) > 1:
# Finding the mid of the array
mid = len(arr)//2
# Dividing the array elements
left_side = arr[:mid]
# into 2 halves
right_side = arr[mid:]
# Sorting the first half
goodPairs(left_side)
# Sorting the second half
goodPairs(right_side)
for i in left_side:
for j in right_side:
if i < j:
count += 1
return count
The current previously accepted answer by Fire Assassin doesn't really answer the question, which asks for better complexity. It's still quadratic, and about as fast as a much simpler quadratic solution. Benchmark with 2000 shuffled ints:
387.5 ms original
108.3 ms pythonic
104.6 ms divide_and_conquer_quadratic
4.1 ms divide_and_conquer_nlogn
4.6 ms divide_and_conquer_nlogn_2
Code (Try it online!):
def original(nums):
count = 0
for i in range(0,len(nums)):
for j in range(i+1,len(nums)):
if i < j and nums[i] < nums[j]:
count += 1
j += 1
j += 1
return count
def pythonic(nums):
count = 0
for i, a in enumerate(nums, 1):
for b in nums[i:]:
if a < b:
count += 1
return count
def divide_and_conquer_quadratic(arr):
count = 0
left_count = 0
right_count = 0
if len(arr) > 1:
mid = len(arr) // 2
left_side = arr[:mid]
right_side = arr[mid:]
left_count = divide_and_conquer_quadratic(left_side)
right_count = divide_and_conquer_quadratic(right_side)
for i in left_side:
for j in right_side:
if i < j:
count += 1
return count + left_count + right_count
def divide_and_conquer_nlogn(arr):
mid = len(arr) // 2
if not mid:
return 0
left = arr[:mid]
right = arr[mid:]
count = divide_and_conquer_nlogn(left)
count += divide_and_conquer_nlogn(right)
i = 0
for r in right:
while i < mid and left[i] < r:
i += 1
count += i
arr[:] = left + right
arr.sort() # linear, as Timsort takes advantage of the two sorted runs
return count
def divide_and_conquer_nlogn_2(arr):
mid = len(arr) // 2
if not mid:
return 0
left = arr[:mid]
right = arr[mid:]
count = divide_and_conquer_nlogn_2(left)
count += divide_and_conquer_nlogn_2(right)
i = 0
arr.clear()
append = arr.append
for r in right:
while i < mid and left[i] < r:
append(left[i])
i += 1
append(r)
count += i
arr += left[i:]
return count
from timeit import timeit
from random import shuffle
arr = list(range(2000))
shuffle(arr)
funcs = [
original,
pythonic,
divide_and_conquer_quadratic,
divide_and_conquer_nlogn,
divide_and_conquer_nlogn_2,
]
for func in funcs:
print(func(arr[:]))
for _ in range(3):
print()
for func in funcs:
arr2 = arr[:]
t = timeit(lambda: func(arr2), number=1)
print('%5.1f ms ' % (t * 1e3), func.__name__)
One of the most well-known divide-and-conquer algorithms is merge sort. And merge sort is actually a really good foundation for this algorithm.
The idea is that when comparing two numbers from two different 'partitions', you already have a lot of information about the remaining part of these partitions, as they're sorted in every iteration.
Let's take an example!
Consider the following partitions, which has already been sorted individually and "good pairs" have been counted.
Partition x: [1, 3, 6, 9].
Partition y: [4, 5, 7, 8].
It is important to note that the numbers from partition x is located further to the left in the original list than partition y. In particular, for every element in x, it's corresponding index i must be smaller than some index j for every element in y.
We will start of by comparing 1 and 4. Obviously 1 is smaller than 4. But since 4 is the smallest element in partition y, 1 must also be smaller than the rest of the elements in y. Consequently, we can conclude that there is 4 additional good pairs, since the index of 1 is also smaller than the index of the remaining elements of y.
The exact same thing happens with 3, and we can add 4 new good pairs to the sum.
For 6 we will conclude that there is two new good pairs. The comparison between 6 and 4 did not yield a good pair and likewise for 6 and 5.
You might now notice how these additional good pairs would be counted? Basically if the element from x is less than the element from y, add the number of elements remaining in y to the sum. Rince and repeat.
Since merge sort is an O(n log n) algorithm, and the additional work in this algorithm is constant, we can conclude that this algorithm is also an O(n log n) algorithm.
I will leave the actual programming as an exercise for you.
#niklasaa has added an explanation for the merge sort analogy, but your implementation still has an issue.
You are partitioning the array and calculating the result for either half, but
You haven't actually sorted either half. So when you're comparing their elements, your two pointer approach isn't correct.
You haven't used their results in the final computation. That's why you're getting an incorrect answer.
For point #1, you should look at merge sort, especially the merge() function. That logic is what will give you the correct pair count without having O(N^2) iteration.
For point #2, store the result for either half first:
# Sorting the first half
leftCount = goodPairs(left_side)
# Sorting the second half
rightCount = goodPairs(right_side)
While returning the final count, add these two results as well.
return count + leftCount + rightCount
Like #Abhinav Mathur stated, you have most of the code down, your problem is with these lines:
# Sorting the first half
goodPairs(left_side)
# Sorting the second half
goodPairs(right_side)
You want to store these in variables that should be declared before the if statement. Here's an updated version of your code:
def goodPairs(arr):
count = 0
left_count = 0
right_count = 0
if len(arr) > 1:
mid = len(arr) // 2
left_side = arr[:mid]
right_side = arr[mid:]
left_count = goodPairs(left_side)
right_count = goodPairs(right_side)
for i in left_side:
for j in right_side:
if i < j:
count += 1
return count + left_count + right_count
Recursion can be difficult at times, look into the idea of merge sort and quick sort to get better ideas on how the divide and conquer algorithms work.

Grab 'n' numbers from a given list of numbers with minimum difference between them

I put up a similar question a few hours ago, albeit with a few mistakes, and my poor understanding, admittedly
So the question is, from a given list of indefinite numbers, I'm supposed to take an input from the user, say 3, and grab 3 numbers wherein the numbers have the least difference between them.
def findMinDiff(arr):
# Initialize difference as infinite
diff = 10**20
n = len(arr)
# Find the min diff by comparing difference
# of all possible pairs in given array
for i in range(n-1):
for j in range(i+1,n):
if abs(arr[i]-arr[j]) < diff:
diff = abs(arr[i] - arr[j])
# Return min diff
return diff
def findDiffArray(arr):
diff = 10**20
arr_diff = []
n = len(arr)
for i in range(n-1):
arr_diff.append(abs(arr[i]-arr[i+1]))
return arr_diff
def choosingElements(arr, arr_diff):
arr_choose = []
least_index = 0
least = arr_diff[0]
least_index_array = []
flag = 0
flag2 = 0
for z in range(0,3):
for i in range(0,len(arr_diff)-1):
if arr_diff[i] < least:
if flag > 0:
if i == least_index:
continue
least = arr_diff[i]
least_index = i
least_index_array.append(i)
arr_choose.append(arr[i])
flag += 1
arr_choose.append(arr[i+1])
flag += 1
print("least index is", least_index)
return arr_choose
# Driver code
arr = [1, 5, 3, 19, 18, 25]
arr_diff = findDiffArray(arr)
arr_diff2 = arr_diff.copy()
item_number = int(input("Enter the number of gifts"))
arr_choose = choosingElements(arr, arr_diff2)
print("Minimum difference is " + str(findMinDiff(arr)))
print("Difference array")
print(*arr_diff, sep = "\n")
print("Numbers with least difference for specified items are", arr_choose)
This is how much I've tried, and I've thought to find the difference between numbers, and keep picking ones with the least difference between them, and I realised that my approach is probably wrong.
Can anybody kindly help me out? Thanks!
Now, I'm sure the time complexity on this isn't great, and it might be hard to understand, but how about this:
arr = [1, 18, 5, 19, 25, 3]
# calculates length of the overall path
def calc_path_difference(arr, i1, i2, i3):
return abs(arr[i1] - arr[i2]) + abs(arr[i2] - arr[i3])
# returns dictionary with differences to other numbers in arr from each number
def differences_dict(arr):
return {
current: [
abs(number - current) if abs(number - current) != 0 else float("inf")
for number in arr
]
for current in arr
}
differences = differences_dict(arr)
# Just to give some starting point, take the first three elements of arr
current_path = [calc_path_difference(arr, 0, 1, 2), 0, 1, 2]
# Loop 1
for i, num in enumerate(arr):
# Save some time by skippin numbers who's path
# already exceeds the min path we currently have
if not min(differences[num]) < current_path[0]:
continue
# Loop 2
for j, num2 in enumerate(arr):
# So you can't get 2 of the same index
if j == i:
continue
# some code for making indices i and j of differences
# infinite so they can't be the smallest, but not sure if
# this is needed without more tests
# diff_arr_copy = differences[num2].copy()
# diff_arr_copy[i], diff_arr_copy[j] = float("inf"), float("inf")
# Get index of number in arr with smallest difference to num2
min_index = differences[num2].index(min(differences[num2]))
# So you can't get 2 of the same index again
if min_index == i or min_index == j:
continue
# Total of current path
path_total = calc_path_difference(arr, i, j, min_index)
# Change current path if this one is shorter
if path_total < current_path[0]:
current_path = [path_total, i, j, min_index]
Does this work for you? I played around with the order of the elements in the array and it seemed to give the correct output each time but I would have liked to have another example to test it on.

How to sum elements of the rows of a lattice periodically

Suppose I have a lattice
a = np.array([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]])
I'd like to make a function func(lattice, start, end) that takes in 3 inputs, where start and end are the index of rows for which the function would sum the elements. For example, for func(a,1,3) it'll sum all the elements of those rows such that func(a,1,3) = 2+2+2+2+3+3+3+3+4+4+4+4 = 36.
Now I know this can be done easily with slicing and np.sum() whatever. But crucially what I want func to do is to also have the ability to wrap around. Namely func(a,2,4) should return 3+3+3+3+4+4+4+4+1+1+1+1.
Couple more examples would be
func(a,3,4) = 4+4+4+4+1+1+1+1
func(a,3,5) = 4+4+4+4+1+1+1+1+2+2+2+2
func(a,0,1) = 1+1+1+1+2+2+2+2
In my situation I'm never gonna get to a point where it'll sum the whole thing again i.e.
func(a,3,6) = sum of all elements
Update:
For my algorithm
for i in range(MC_STEPS_NODE):
sweep(lattice, prob, start_index_master, end_index_master,
rows_for_master)
# calculate the energy
Ene = subhamiltonian(lattice, start_index_master, end_index_master)
# calculate the magnetisation
Mag = mag(lattice, start_index_master, end_index_master)
E1 += Ene
M1 += Mag
E2 += Ene*Ene
M2 += Mag*Mag
if i % sites_for_master == 0:
comm.Send([lattice[start_index_master:start_index_master+1], L, MPI.INT],
dest=(rank-1)%size, tag=4)
comm.Recv([lattice[end_index_master:end_index_master+1], L, MPI.INT],
source = (rank+1)%size, tag=4)
start_index_master = (start_index_master + 1)
end_index_master = (end_index_master + 1)
if start_index_master > 100:
start_index_master = start_index_master % L
if end_index_master > 100:
end_index_master = end_index_master % L
The function I want is the mag() function which calculates the magnetisation of a sublattice which are just sum of all its elements. Imagine a LxL lattice split up into two sublattices, one belongs to the master and the other to the worker. Each sweep sweeps the corresponding sublattice of lattice with start_index_master and end_index_master determining the start and end row of the sublattice. For every i%sites_for_master = 0, the indices move down by adding 1, eventually mod by 100 to prevent memory overflow in mpi4py. So you can imagine if the sublattice is at the centre of the main lattice then start_index_master < end_index_master. Eventually the sublattice will keep moving down to the point where start_index_master < end_index_master where end_index_master > L, so in this case if start_index_master = 10 for a lattice L=10, the most bottom row of the sublattice is the first row ([0]) of the main lattice.
Energy function:
def subhamiltonian(lattice: np.ndarray, col_len_start: int,
col_len_end: int) -> float:
energy = 0
for i in range(col_len_start, col_len_end+1):
for j in range(len(lattice)):
spin = lattice[i%L, j]
nb_sum = lattice[(i%L+1) % L, j] + lattice[i%L, (j+1) % L] + \
lattice[(i%L-1) % L, j] + lattice[i%L, (j-1) % L]
energy += -nb_sum*spin
return energy/4.
This is my function for computing the energy of the sublattice.
You could use np.arange to create the indexes to be summed.
>>> def func(lattice, start, end):
... rows = lattice.shape[0]
... return lattice[np.arange(start, end+1) % rows].sum()
...
>>> func(a,3,4)
20
>>> func(a,3,5)
28
>>> func(a,0,1)
12
You can check if the stop index wraps-around and if it does add the sum from the beginning of the array to the result. This is efficient because it relies on slice indexing and only does extra work if necessary.
def func(a, start, stop):
stop += 1
result = np.sum(a[start:stop])
if stop > len(a):
result += np.sum(a[:stop % len(a)])
return result
The above version works for stop - start < len(a), i.e. no more than one full wrap-around. For an arbitrary number of wrap-around (i.e. arbitrary values for start and stop) the following version can be used:
def multi_wraps(a, start, stop):
result = 0
# Adjust both indices in case the start index wrapped around.
stop -= (start // len(a)) * len(a)
start %= len(a)
stop += 1 # Include the element pointed to by the stop index.
n_wraps = (stop - start) // len(a)
if n_wraps > 0:
result += n_wraps * a.sum()
stop = start + (stop - start) % len(a)
result += np.sum(a[start:stop])
if stop > len(a):
result += np.sum(a[:stop % len(a)])
return result
In case n_wraps > 0 some parts of the array will be summed twice which is unnecessarily inefficient, so we can compute the sum of the various array parts as necessary. The following version sums every array element at most once:
def multi_wraps_efficient(a, start, stop):
# Adjust both indices in case the start index wrapped around.
stop -= (start // len(a)) * len(a)
start %= len(a)
stop += 1 # Include the element pointed to by the stop index.
n_wraps = (stop - start) // len(a)
stop = start + (stop - start) % len(a) # Eliminate the wraps since they will be accounted for separately.
tail_sum = a[start:stop].sum()
if stop > len(a):
head_sum = a[:stop % len(a)].sum()
if n_wraps > 0:
remaining_sum = a[stop % len(a):start].sum()
elif n_wraps > 0:
head_sum = a[:start].sum()
remaining_sum = a[stop:].sum()
result = tail_sum
if stop > len(a):
result += head_sum
if n_wraps > 0:
result += n_wraps * (head_sum + tail_sum + remaining_sum)
return result
The following plot shows a performance comparison between using index arrays and the two multi-wrap methods presented above. The tests are run on a (1_000, 1_000) lattice. One can observe that for the multi_wraps method there is an increase in runtime when going from 1 to 2 wrap-around since it unnecessarily sums the array twice. The multi_wraps_efficient method has the same performance irregardless of the number of wrap-around since it sums every array element no more than once.
The performance plot was generated using the perfplot package:
perfplot.show(
setup=lambda n: (np.ones(shape=(1_000, 1_000), dtype=int), 400, n*1_000 + 200),
kernels=[
lambda x: index_arrays(*x),
lambda x: multi_wraps(*x),
lambda x: multi_wraps_efficient(*x),
],
labels=['index_arrays', 'multi_wraps', 'multi_wraps_efficient'],
n_range=range(1, 11),
xlabel="Number of wrap-around",
equality_check=lambda x, y: x == y,
)

count inversions in mergesort python

I want to count how many inversions there are in a list while sorting the list using mergesort. This is my code so far where 'x' counts the ammount of inversions while the rest sorts it:
import sys
x = 0
def merge_sort(A):
merge_sort2(A, 0, len(A) - 1)
def merge_sort2(A, first, last):
if first < last:
middle = (first + last) // 2
merge_sort2(A, first, middle)
merge_sort2(A, middle + 1, last)
merge(A, first, middle, last)
def merge(A, first, middle, last):
global x
L = A[first:middle + 1]
R = A[middle + 1:last + 1]
L.append(sys.maxsize)
R.append(sys.maxsize)
i = j = 0
for k in range(first, last + 1):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
x += 1
x += len(L[first + 1:])
When I call merge sort using a list, the variable x is support to give the amount of inversions in the list. So If the list was '[4,3,2,1], x would be 6. If the list was [1,2,3] x would be 0. I change the value of x whenever the right is greater than the left in the merge definition however, the number always gets way too big. What am I doing wrong?
Check my work but, I think instead of:
x += 1
x += len(L[first + 1:])
you want:
x += middle + 1 + j - k
basically, you want to add the difference between where item k is actually coming from, and where you'd expect it to come from if everything was already sorted.
Your merge step is a little hard for me to understand — I'm not sure why you are doing this (maybe just another way to merge?):
L.append(sys.maxsize)
R.append(sys.maxsize)
but I couldn't get everything to work out with the extra elements added to the partitions. And I think you end up counting the extra element in L as an inversion with each merge move from R
I think that's causing some of the problems. But you also have two other issues:
Your last line isn't quite the right logic:
x += len(L[first + 1:])
the number of inversions will the number of elements in L that you jump over. You're counting almost every element of L each time. Something like this works better:
x += len(L[i:])
and then at the end, you may have elements left over whose inversions you haven't counted yet. Maybe that's not an issue with your extra elements but in a more traditional merge it is. Here's the way I would count the inversions:
def merge(A, first, middle, last):
global x
L = A[first:middle+1]
R = A[middle+1:last+1]
i = j = 0
k = first
print(L, R)
while i<len(L) and j<len(R):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
# count how many left in L
x += len(L[i:])
k += 1
# take care of any leftovers in L or R
while i < len(L):
A[k] = L[i]
i += 1
k+=1
while j < len(R):
A[k] = R[j]
j += 1
k+=1
x += len(L[i:])

How to efficiently get all combinations where the sum is 10 or below in Python

Imagine you're trying to allocate some fixed resources (e.g. n=10) over some number of territories (e.g. t=5). I am trying to find out efficiently how to get all the combinations where the sum is n or below.
E.g. 10,0,0,0,0 is good, as well as 0,0,5,5,0 etc., while 3,3,3,3,3,3 is obviously wrong.
I got this far:
import itertools
t = 5
n = 10
r = [range(n+1)] * t
for x in itertools.product(*r):
if sum(x) <= n:
print x
This brute force approach is incredibly slow though; there must be a better way?
Timings (1000 iterations):
Default (itertools.product) --- time: 40.90 s
falsetru recursion --- time: 3.63 s
Aaron Williams Algorithm (impl, Tony) --- time: 0.37 s
Possible approach follows. Definitely would use with caution (hardly tested at all, but the results on n=10 and t=5 look reasonable).
The approach involves no recursion. The algorithm to generate partitions of a number n (10 in your example) having m elements (5 in your example) comes from Knuth's 4th volume. Each partition is then zero-extended if necessary, and all the distinct permutations are generated using an algorithm from Aaron Williams which I have seen referred to elsewhere. Both algorithms had to be translated to Python, and that increases the chance that errors have crept in. The Williams algorithm wanted a linked list, which I had to fake with a 2D array to avoid writing a linked-list class.
There goes an afternoon!
Code (note your n is my maxn and your t is my p):
import itertools
def visit(a, m):
""" Utility function to add partition to the list"""
x.append(a[1:m+1])
def parts(a, n, m):
""" Knuth Algorithm H, Combinatorial Algorithms, Pre-Fascicle 3B
Finds all partitions of n having exactly m elements.
An upper bound on running time is (3 x number of
partitions found) + m. Not recursive!
"""
while (1):
visit(a, m)
while a[2] < a[1]-1:
a[1] -= 1
a[2] += 1
visit(a, m)
j=3
s = a[1]+a[2]-1
while a[j] >= a[1]-1:
s += a[j]
j += 1
if j > m:
break
x = a[j] + 1
a[j] = x
j -= 1
while j>1:
a[j] = x
s -= x
j -= 1
a[1] = s
def distinct_perms(partition):
""" Aaron Williams Algorithm 1, "Loopless Generation of Multiset
Permutations by Prefix Shifts". Finds all distinct permutations
of a list with repeated items. I don't follow the paper all that
well, but it _possibly_ has a running time which is proportional
to the number of permutations (with 3 shift operations for each
permutation on average). Not recursive!
"""
perms = []
val = 0
nxt = 1
l1 = [[partition[i],i+1] for i in range(len(partition))]
l1[-1][nxt] = None
#print(l1)
head = 0
i = len(l1)-2
afteri = i+1
tmp = []
tmp += [l1[head][val]]
c = head
while l1[c][nxt] != None:
tmp += [l1[l1[c][nxt]][val]]
c = l1[c][nxt]
perms.extend([tmp])
while (l1[afteri][nxt] != None) or (l1[afteri][val] < l1[head][val]):
if (l1[afteri][nxt] != None) and (l1[i][val]>=l1[l1[afteri][nxt]][val]):
beforek = afteri
else:
beforek = i
k = l1[beforek][nxt]
l1[beforek][nxt] = l1[k][nxt]
l1[k][nxt] = head
if l1[k][val] < l1[head][val]:
i = k
afteri = l1[i][nxt]
head = k
tmp = []
tmp += [l1[head][val]]
c = head
while l1[c][nxt] != None:
tmp += [l1[l1[c][nxt]][val]]
c = l1[c][nxt]
perms.extend([tmp])
return perms
maxn = 10 # max integer to find partitions of
p = 5 # max number of items in each partition
# Find all partitions of length p or less adding up
# to maxn or less
# Special cases (Knuth's algorithm requires n and m >= 2)
x = [[i] for i in range(maxn+1)]
# Main cases: runs parts fn (maxn^2+maxn)/2 times
for i in range(2, maxn+1):
for j in range(2, min(p+1, i+1)):
m = j
n = i
a = [0, n-m+1] + [1] * (m-1) + [-1] + [0] * (n-m-1)
parts(a, n, m)
y = []
# For each partition, add zeros if necessary and then find
# distinct permutations. Runs distinct_perms function once
# for each partition.
for part in x:
if len(part) < p:
y += distinct_perms(part + [0] * (p - len(part)))
else:
y += distinct_perms(part)
print(y)
print(len(y))
Make your own recursive function which do not recurse with an element unless it's possible to make a sum <= 10.
def f(r, n, t, acc=[]):
if t == 0:
if n >= 0:
yield acc
return
for x in r:
if x > n: # <---- do not recurse if sum is larger than `n`
break
for lst in f(r, n-x, t-1, acc + [x]):
yield lst
t = 5
n = 10
for xs in f(range(n+1), n, 5):
print xs
You can create all the permutations with itertools, and parse the results with numpy.
>>> import numpy as np
>>> from itertools import product
>>> t = 5
>>> n = 10
>>> r = range(n+1)
# Create the product numpy array
>>> prod = np.fromiter(product(r, repeat=t), np.dtype('u1,' * t))
>>> prod = prod.view('u1').reshape(-1, t)
# Extract only permutations that satisfy a condition
>>> prod[prod.sum(axis=1) < n]
Timeit:
>>> %%timeit
prod = np.fromiter(product(r, repeat=t), np.dtype('u1,' * t))
prod = prod.view('u1').reshape(-1, t)
prod[prod.sum(axis=1) < n]
10 loops, best of 3: 41.6 ms per loop
You could even speed up the product computation by populating combinations directly in numpy.
You could optimize the algorithm using Dynamic Programming.
Basically, have an array a, where a[i][j] means "Can I get a sum of j with the elements up to the j-th element (and using the jth element, assuming you have your elements in an array t (not the number you mentioned)).
Then you can fill the array doing
a[0][t[0]] = True
for i in range(1, len(t)):
a[i][t[i]] = True
for j in range(t[i]+1, n+1):
for k in range(0, i):
if a[k][j-t[i]]:
a[i][j] = True
Then, using this info, you could backtrack the solution :)
def backtrack(j = len(t)-1, goal = n):
print j, goal
all_solutions = []
if j == -1:
return []
if goal == t[j]:
all_solutions.append([j])
for i in range(j-1, -1, -1):
if a[i][goal-t[j]]:
r = backtrack(i, goal - t[j])
for l in r:
print l
l.append(j)
all_solutions.append(l)
all_solutions.extend(backtrack(j-1, goal))
return all_solutions
backtrack() # is the answer

Categories

Resources