Creating 'n' combinations randomly from multiple lists - python

def models():
default = [0.6,0.67,2.4e-2,1e-2,2e-5,1.2e-3,2e-5]
lower = [np.log10(i/10) for i in default]
upper = [np.log10(i*10) for i in default]
n = 5
a = np.logspace(lower[0],upper[0],n)
b = np.logspace(lower[1],upper[1],n)
c = np.logspace(lower[2],upper[2],n)
d = np.logspace(lower[3],upper[3],n)
e = np.logspace(lower[4],upper[4],n)
f = np.logspace(lower[5],upper[5],n)
g = np.logspace(lower[6],upper[6],n)
combs = itertools.product(a,b,c,d,e,f,g)
list1 = []
for x in combs:
x = list(x)
list1.append(x)
return list1
The code above returns a list of 5^7 = 78,125 lists. Is there a way I can combine items in a,b,c,d,e,f,g, possibly randomly, to create a list of say, 10000, lists?

You could take random samples of each array and combine them, especially if you don't need to guarantee that specific combinations don't occur more than once:
import numpy as np
import random
def random_models(num_values):
n = 5
default = [0.6, 0.67, 2.4e-2, 1e-2, 2e-5, 1.2e-3, 2e-5]
ranges = zip((np.log10(i/10) for i in default),
(np.log10(i*10) for i in default))
data_arrays = []
for lower, upper in ranges:
data_arrays.append(np.logspace(lower, upper, n))
results = []
for i in xrange(num_values):
results.append([random.choice(arr) for arr in data_arrays])
return results
l = random_models(10000)
print len(l)
Here's a version that will avoid repeats up until you request more data than can be given without repeating:
def random_models_avoid_repeats(num_values):
n = 5
default = [0.6, 0.67, 2.4e-2, 1e-2, 2e-5, 1.2e-3, 2e-5]
# Build the range data (tuples of (lower, upper) range)
ranges = zip((np.log10(i/10) for i in default),
(np.log10(i*10) for i in default))
# Create the data arrays to sample from
data_arrays = []
for lower, upper in ranges:
data_arrays.append(np.logspace(lower, upper, n))
sequence_data = []
for entry in itertools.product(*data_arrays):
sequence_data.append(entry)
results = []
# Holds the current choices to choose from. The data will come from
# sequence_data above, but randomly shuffled. Values are popped off the
# end to keep things efficient. It's possible to ask for more data than
# the samples can give without repeats. In that case, we'll reload
# temp_data, randomly shuffle again, and start the process over until we've
# delivered the number of desired results.
temp_data = []
# Build the lists
for i in xrange(num_values):
if len(temp_data) == 0:
temp_data = sequence_data[:]
random.shuffle(temp_data)
results.append(temp_data.pop())
return results
Also note that we can avoid building a results list if you make this a generator by using yield. However, you'd want to consume the results using a forstatement as well:
def random_models_avoid_repeats_generator(num_values):
n = 5
default = [0.6, 0.67, 2.4e-2, 1e-2, 2e-5, 1.2e-3, 2e-5]
# Build the range data (tuples of (lower, upper) range)
ranges = zip((np.log10(i/10) for i in default),
(np.log10(i*10) for i in default))
# Create the data arrays to sample from
data_arrays = []
for lower, upper in ranges:
data_arrays.append(np.logspace(lower, upper, n))
sequence_data = []
for entry in itertools.product(*data_arrays):
sequence_data.append(entry)
# Holds the current choices to choose from. The data will come from
# sequence_data above, but randomly shuffled. Values are popped off the
# end to keep things efficient. It's possible to ask for more data than
# the samples can give without repeats. In that case, we'll reload
# temp_data, randomly shuffle again, and start the process over until we've
# delivered the number of desired results.
temp_data = []
# Build the lists
for i in xrange(num_values):
if len(temp_data) == 0:
temp_data = sequence_data[:]
random.shuffle(temp_data)
yield temp_data.pop()
You'd have to use it like this:
for entry in random_models_avoid_repeats_generator(10000):
# Do stuff...
Or manually iterate over it using next().

Related

Finding similar numbers in a list and getting the average

I currently have the numbers above in a list. How would you go about adding similar numbers (by nearest 850) and finding average to make the list smaller.
For example I have the list
l = [2000,2200,5000,2350]
In this list, i want to find numbers that are similar by n+500
So I want all the numbers similar by n+500 which are 2000,2200,2350 to be added and divided by the amount there which is 3 to find the mean. This will then replace the three numbers added. so the list will now be l = [2183,5000]
As the image above shows the numbers in the list. Here I would like the numbers close by n+850 to all be selected and the mean to be found
It seems that you look for a clustering algorithm - something like K-means.
This algorithm is implemented in scikit-learn package
After you find your K means, you can count how many of your data were clustered with that mean, and make your computations.
However, it's not clear in your case what is K. You can try and run the algorithm for several K values until you get your constraints (the n+500 distance between the means)
You can use:
import numpy as np
l = np.array([2000,2200,5000,2350])
# find similar numbers (that are within each 500 fold)
similar = l // 500
# for each similar group get the average and convert it to integer (as in the desired output)
new_list = [np.average(l[similar == num]).astype(int) for num in np.unique(similar)]
print(new_list)
Output:
[2183, 5000]
Step 1:
list = [5620.77978515625,
7388.43017578125,
7683.580078125,
8296.6513671875,
8320.82421875,
8557.51953125,
8743.5,
9163.220703125,
9804.7939453125,
9913.86328125,
9940.1396484375,
9951.74609375,
10074.23828125,
10947.0419921875,
11048.662109375,
11704.099609375,
11958.5,
11964.8232421875,
12335.70703125,
13103.0,
13129.529296875,
16463.177734375,
16930.900390625,
17712.400390625,
18353.400390625,
19390.96484375,
20089.0,
34592.15625,
36542.109375,
39478.953125,
40782.078125,
41295.26953125,
42541.6796875,
42893.58203125,
44578.27734375,
45077.578125,
48022.2890625,
52535.13671875,
58330.5703125,
61597.91796875,
62757.12890625,
64242.79296875,
64863.09765625,
66930.390625]
Step 2:
seen = [] #to log used indices pairs
diff_dic = {} #to record indices and diff
for i,a in enumerate(list):
for j,b in enumerate(list):
if i!=j and (i,j)[::-1] not in seen:
seen.append((i,j))
diff_dic[(i,j)] = abs(a-b)
keys = []
for ind, diff in diff_dic.items():
if diff <= 850:
keys.append(ind)
uniques_k = [] #to record unique indices
for pair in keys:
for key in pair:
if key not in uniques_k:
uniques_k.append(key)
import numpy as np
list_arr = np.array(list)
nearest_avg = np.mean(list_arr[uniques_k])
list_arr = np.delete(list_arr, uniques_k)
list_arr = np.append(list_arr, nearest_avg)
list_arr
output:
array([ 5620.77978516, 34592.15625, 36542.109375, 39478.953125, 48022.2890625, 52535.13671875, 58330.5703125 , 61597.91796875, 62757.12890625, 66930.390625 , 20566.00205365])
You just need a conditional list comprehension like this:
l = [2000,2200,5000,2350]
n = 2000
a = [ (x) for x in l if ((n -250) < x < (n + 250)) ]
Then you can average with
np.mean(a)
or whatever method you prefer.

Split a nested list in equally sized parts

I would like to separate a list of 80 sets of coordinates in backets of 8 sets each.
This is what I tried. I found the indexes where the backets start. I sliced the list of coordinates between one index and the next. FInally, I used an if statement to create the final backet, since there is no 'next' index for the last index. Any ideas to improve this approach?
Thank you.
nested_lst = [[0.5, 11.3, 5.1]]*80
indexes = list(range(len(nested_lst)))[::8]
backets = []
for i in range(len(indexes)):
if i != len(indexes) - 1:
backet = nested_lst[indexes[i]:indexes[i+1]]
else:
backet = nested_lst[indexes[i]:]
backets.append(backet)
The coordinates list can be flattened, and a simple iteration should work.
coordinates = [i for i in coordinates]
backets = []
i = 0
while(i < len(coordinates)):
l = []
for _ in range(8):
l.append(coordinates[i])
i += 1
backets.append(l)
Could this work for you? Reference answer here
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
nested_lst = [[0.5, 11.3, 5.1]]*80
backets = list(batch(nested_lst, n=8))
print(backets)
The results are matching yours, but this might be a more efficient and better-looking way to do it
Use numpy! It'll be much faster and simpler
import numpy as np
coordinates = np.array([0.5, 11.3, 5.1]*80)
# Number of buckets
chunks = 8
# It is a must to have equally sized buckets, but reshape will also fail is this is not ok...
assert(coordinates.size % chunks == 0)
# Size of each bucket
chunksize = coordinates.size // chunks
# There you go, 8 buckets with the same size
res = coordinates.reshape((chunks, chunksize))

Realign indexes to a changed python collection

I have a collection of data and a variable containing indexes to some of them.
A filtering operation is applied on the data that eliminates a subset of the data.
I want to shift the indexes so that they refer to the updated collection of data (eliminating indexes to deleted instances).
I'm using the implementation in the function below. I'm also posting the code I used to validate that it works.
Is there a quick & fast way to do the index realignment via the core libraries or a better way in general?
import random
def align_index(wanted_idx, mask):
"""
Function to align a set of indexes to a collection after deletions,
indicated with a mask
Arguments:
wanted_idx: List of desired integer indexes prior to deletion
mask: Binary mask, where 1's indicate elements that survive deletion
Returns:
List of integer indexes to (surviving) desired elements, post-deletion
"""
# rebuild indexes: remove dangling
new_idx = [idx for (i, idx) in enumerate(wanted_idx) if mask[idx]]
# mark deleted
not_mask = [int(not m) for m in mask]
# cumsum deleted regions
realigned_idx = [k-sum(not_mask[:k+1]) for k in new_idx]
return realigned_idx
# data
data = [random.randint(0,500) for _ in range(1000)]
rng = list(range(len(data)))
for _ in range(1000):
# random data deletion / request
wanted_idx = random.sample(rng, random.randint(5,100))
del_index = random.sample(rng, random.randint(5, 100))
# apply deletion
mask = [int(i not in del_index) for i in range(len(data))]
filtered_data = [data[i] for (i, m) in enumerate(mask) if m]
realigned_index = align_index(wanted_idx, mask)
# verify
new_idx = [idx for (i, idx) in enumerate(wanted_idx) if mask[idx]]
l1 = [data[k] for k in new_idx]
l2 = [filtered_data[k] for k in realigned_index]
assert l1 == l2
If you use numpy it's quite trivial:
import numpy as np
mask = np.array(mask, dtype=np.bool)
new_idx = np.cumsum(mask, dtype=np.int64)
new_idx[mask] = -1
You shouldn't need to recompute new_idx unless more elements get deleted.
Then you can get the remapped index for old index i just by looking new_idx[i]. Or a whole array at once:
wanted_idx = np.array(wanted_idx, dtype=np.int64)
remapped_idx = new_idx[wanted_idx]
Note that deleted indices get assigned value -1. You can filter these out if you want:
remapped_idx = remapped_idx[remapped_idx >= 0]

Python sample without replacement and change population

If you have a list of 100 values, which you want to subset into 3 in the ratio 2:1:1, what's the easiest way to do this in Python?
My current solution is to take a sample of the indices for each subset then remove these values from the original list, i.e.
my_list = [....]
num_A = 50
subset_A = []
num_B = 25
subset_B = []
num_C = 25
subset_C = []
a_indices = random.sample(xrange(len(my_list)), num_A)
for i in sorted(a_indices, reverse=True): # Otherwise can get index out of range
subset_A.append(my_list.pop(i))
b_indices = random.sample(xrange(len(my_list)), num_B)
for i in sorted(b_indices, reverse=True): # Otherwise can get index out of range
subset_B.append(my_list.pop(i))
subset_C = my_list[:]
assert len(subset_C) == num_C
However I'm sure there's a much more elegant solution than this.
There's a much easier way. You can just shuffle the array and take parts.
xs = [...]
random.shuffle(xs)
print(xs[:50], xs[50:75], xs[75:])

Speed up comparison of floats between lists

I have a block of code which does the following:
take a float from a list, b_lst below, of index indx
check if this float is located between a float of index i and the next one (of index i+1) in list a_lst
if it is, then store indx in a sub-list of a third list (c_lst) where the index of that sub-list is the index of the left float in a_lst (ie: i)
repeat for all floats in b_lst
Here's a MWE which shows what the code does:
import numpy as np
import timeit
def random_data(N):
# Generate some random data.
return np.random.uniform(0., 10., N).tolist()
# Data lists.
# Note that a_lst is sorted.
a_lst = np.sort(random_data(1000))
b_lst = random_data(5000)
# Fixed index value (int)
c = 25
def func():
# Create empty list with as many sub-lists as elements present
# in a_lst beyond the 'c' index.
c_lst = [[] for _ in range(len(a_lst[c:])-1)]
# For each element in b_lst.
for indx,elem in enumerate(b_lst):
# For elements in a_lst beyond the 'c' index.
for i in range(len(a_lst[c:])-1):
# Check if 'elem' is between this a_lst element
# and the next.
if a_lst[c+i] < elem <= a_lst[c+(i+1)]:
# If it is then store the index of 'elem' ('indx')
# in the 'i' sub-list of c_lst.
c_lst[i].append(indx)
return c_lst
print func()
# time function.
func_time = timeit.timeit(func, number=10)
print func_time
This code works as it should but I really need to improve its performance since it's slowing down the rest of my code.
Add
This is the optimized function based on the accepted answer. It's quite ugly but it gets the job done.
def func_opt():
c_lst = [[] for _ in range(len(a_lst[c:])-1)]
c_opt = np.searchsorted(a_lst[c:], b_lst, side='left')
for elem in c_opt:
if 0<elem<len(a_lst[c:]):
c_lst[elem-1] = np.where(c_opt==elem)[0].tolist()
return c_lst
In my tests this is ~7x faster than the original function.
Add 2
Much faster not using np.where:
def func_opt2():
c_lst = [[] for _ in range(len(a_lst[c:])-1)]
c_opt = np.searchsorted(a_lst[c:], b_lst, side='left')
for indx,elem in enumerate(c_opt):
if 0<elem<len(a_lst[c:]):
c_lst[elem-1].append(indx)
return c_lst
This is ~130x faster than the original function.
Add 3
Following jtaylor's advice I converted the result of np.searchsorted to a list with .tolist():
def func_opt3():
c_lst = [[] for _ in range(len(a_lst[c:])-1)]
c_opt = np.searchsorted(a_lst[c:], b_lst, side='left').tolist()
for indx,elem in enumerate(c_opt):
if 0<elem<len(a_lst[c:]):
c_lst[elem-1].append(indx)
return c_lst
This is ~470x faster than the original function.
You want to take a look at numpy's searchsorted. Calling
np.searchsorted(a_lst, b_lst, side='right')
will return an array of indices, the same length as b_lst, holding before which item in a_lst they should be inserted to preserve order. It will be very fast, as it uses binary search and the looping happens in C. You could then create your subarrays with fancy indexing, e.g.:
>>> a = np.arange(1, 10)
>>> b = np.random.rand(100) * 10
>>> c = np.searchsorted(a, b, side='right')
>>> b[c == 0]
array([ 0.54620226, 0.40043875, 0.62398925, 0.40097674, 0.58765603,
0.14045264, 0.16990249, 0.78264088, 0.51507254, 0.31808327,
0.03895417, 0.92130027])
>>> b[c == 1]
array([ 1.34599709, 1.42645778, 1.13025996, 1.20096723, 1.75724448,
1.87447058, 1.23422399, 1.37807553, 1.64118058, 1.53740299])

Categories

Resources