Calculate sum of vectors in numpy array based on dictionary values - python

I have an array like the following, but much larger:
array = np.random.randint(6, size=(5, 4))
array([[4, 3, 0, 2],
[1, 4, 3, 1],
[0, 3, 5, 2],
[1, 0, 5, 3],
[0, 5, 4, 4]])
I also have a dictionary which gives me the vector representation of each value in this array:
dict_ = {2:np.array([3.4, 2.6, -1.2]), 0:np.array([0, 0, 0]), 1:np.array([3.9, 2.6, -1.2]), 3:np.array([3.8, 6.6, -1.9]), 4:np.array([5.4, 2.6, -1.2]),5:np.array([6.4, 2.6, -1.2])}
I want to calculate the average of the vector representations for each row in the array, but when the value is 0, ignore it when calculating average (dictionary shows it as a 0 vector).
For example, for the first row, it should average [5.4, 2.6, -1.2], [3.8, 6.6, -1.9], and [3.4, 2.6, -1.2], and give [4.2, 3.93, -1.43] as the first row of the output.
I want an output which keeps the same row structure, and has 3 columns (each vector in the dictionary has 3 values).
How can this be done in an efficient way? My actual dictionary has over 100000 entries and array is 100000 by 5000.

For efficiency I would transform the dict to an array and then use advanced indexing for lookup:
>>> import numpy as np
>>>
# create problem
>>> v = np.random.random((100_000, 3))
>>> dict_ = dict(enumerate(v))
>>> arr = np.random.randint(0, 100_000, (100_000, 100))
>>>
# solve
>>> from operator import itemgetter
>>> lookup = np.array(itemgetter(*range(100_000))(dict_))
>>> lookup[0] = np.nan
>>> result = np.nanmean(lookup[arr], axis=1)
Or applied to OP's example:
>>> arr = np.array([[4, 3, 0, 2],
... [1, 4, 3, 1],
... [0, 3, 5, 2],
... [1, 0, 5, 3],
... [0, 5, 4, 4]])
>>> dict_ = {2:np.array([3.4, 2.6, -1.2]), 0:np.array([0, 0, 0]), 1:np.array([3.9, 2.6, -1.2]), 3:np.array([3.8, 6.6, -1.9]), 4:np.array([5.4, 2.6, -1.2]),5:np.array([6.4, 2.6, -1.2])}
>>>
>>> lookup = np.array(itemgetter(*range(6))(dict_))
>>> lookup[0] = np.nan
>>> result = np.nanmean(lookup[arr], axis=1)
>>> result
array([[ 4.2 , 3.93333333, -1.43333333],
[ 4.25 , 3.6 , -1.375 ],
[ 4.53333333, 3.93333333, -1.43333333],
[ 4.7 , 3.93333333, -1.43333333],
[ 5.73333333, 2.6 , -1.2 ]])
Timings against #jpp's method:
pp: 0.8046 seconds
jpp: 10.3449 seconds
results equal: True
Code to produce timings:
import numpy as np
# create problem
v = np.random.random((100_000, 3))
dict_ = dict(enumerate(v))
arr = np.random.randint(0, 100_000, (100_000, 100))
# solve
from operator import itemgetter
def f_pp(arr, dict_):
lookup = np.array(itemgetter(*range(100_000))(dict_))
lookup[0] = np.nan
return np.nanmean(lookup[arr], axis=1)
def f_jpp(arr, dict_):
def averager(x):
lst = [dict_[i] for i in x if i]
return np.mean(lst, axis=0) if lst else np.array([0, 0, 0])
return np.apply_along_axis(averager, -1, arr)
from time import perf_counter
t = perf_counter()
r_pp = f_pp(arr, dict_)
s = perf_counter()
print(f'pp: {s-t:8.4f} seconds')
t = perf_counter()
r_jpp = f_jpp(arr, dict_)
s = perf_counter()
print(f'jpp: {s-t:8.4f} seconds')
print('results equal:', np.allclose(r_pp, r_jpp))

This is one solution using numpy.apply_along_axis.
You should test and benchmark to see if performance is adequate for your use case.
A = np.random.randint(6, size=(5, 4))
print(A)
[[3 5 2 4]
[2 4 5 2]
[0 3 1 1]
[3 4 4 5]
[2 5 0 2]]
zeros = {k for k, v in dict_.items() if (v==0).all()}
def averager(x):
lst = [dict_[i] for i in x if i not in zeros]
return np.mean(lst, axis=0) if lst else np.array([0, 0, 0])
res = np.apply_along_axis(averager, -1, A)
array([[ 4.75 , 3.6 , -1.375 ],
[ 4.65 , 2.6 , -1.2 ],
[ 3.86666667, 3.93333333, -1.43333333],
[ 5.25 , 3.6 , -1.375 ],
[ 4.4 , 2.6 , -1.2 ]])

Related

Add in-between steps into array of numbers (Python)

I am looking for some function that takes an input array of numbers and adds steps (range) between these numbers. I need to specify the length of the output's array.
Example:
input_array = [1, 2, 5, 4]
output_array = do_something(input_array, output_length=10)
Result:
output_array => [1, 1.3, 1.6, 2, 3, 4, 5, 4.6, 4.3, 4]
len(output_array) => 10
Is there something like that, in Numpy for example?
I have a prototype of this function that uses dividing input array into pairs ([0,2], [2,5], [5,8]) and filling "spaces" between with np.linspace() but it don't work well: https://onecompiler.com/python/3xwcy3y7d
def do_something(input_array, output_length):
import math
import numpy as np
output = []
in_between_steps = math.ceil(output_length/len(input_array))
prev_num = None
for num in input_array:
if prev_num is not None:
for in_num in np.linspace(start=prev_num, stop=num, num=in_between_steps, endpoint=False):
output.append(in_num)
prev_num = num
output.append(input_array[len(input_array)-1]) # manually add last item
return output
How it works:
input_array = [1, 2, 5, 4]
print(len(do_something(input_array, output_length=10))) # result: 10 OK
print(len(do_something(input_array, output_length=20))) # result: 16 NOT OK
print(len(do_something(input_array, output_length=200))) # result: 151 NOT OK
I have an array [1, 2, 5, 4] and I need to "expand" a number of items in it but preserve the "shape":
There is numpy.interp which might be what you are looking for.
import numpy as np
points = np.arange(4)
values = np.array([1,2,5,4])
x = np.linspace(0, 3, num=10)
np.interp(x, points, values)
output:
array([1. , 1.33333333, 1.66666667, 2. , 3. ,
4. , 5. , 4.66666667, 4.33333333, 4. ])

python - numpy fancy broadcasting for special case riddle

I want to do some forces calculations between vertices and because the forces are symmetrical I have a list of vertice-pairs that need those forces added. I am sure it's possible with fancy indexing, but I really just can get it to work with a slow python for-loop. for symmetric reasons, the right-hand side of the index array needs a negative sign when adding the forces.
consider you have the vertice index array:
>>> I = np.array([[0,1],[1,2],[2,0]])
I = [[0 1]
[1 2]
[2 0]]
and the x,y forces array for each pair:
>>> F = np.array([[3,6],[4,7],[5,8]])
F = [[3 6]
[4 7]
[5 8]]
the wanted operation could be described as:
"vertice #0 sums the force vectors (3,6) and (-5,-8),
vertice #1 sums the force vectors (-3,-6) and (4,7),
vertice #2 sums the force vectors (-4,-7) and (5,8)"
Desired results:
[ 3 6 ] [ 0 0 ] [-5 -8 ] [-2 -2 ] //resulting force Vertice #0
A = [-3 -6 ] + [ 4 7 ] + [ 0 0 ] = [ 1 1 ] //resulting force Vertice #1
[ 0 0 ] [-4 -7 ] [ 5 8 ] [ 1 1 ] //resulting force Vertice #2
edit:
my ugly for-loop solution:
import numpy as np
I = np.array([[0,1],[1,2],[2,0]])
F = np.array([[3,6],[4,7],[5,8]])
A = np.zeros((3,2))
A_x = np.zeros((3,2))
A_y = np.zeros((3,2))
for row in range(0,len(F)):
A_x[I[row][0],0]= F[row][0]
A_x[I[row][1],1]= -F[row][0]
A_y[I[row][0],0]= F[row][1]
A_y[I[row][1],1]= -F[row][1]
A = np.hstack((np.sum(A_x,axis=1).reshape((3,1)),np.sum(A_y,axis=1).reshape((3,1))))
print(A)
A= [[-2. -2.]
[ 1. 1.]
[ 1. 1.]]
Your current "push-style" interpretation of I is
For row-index k in I, take the forces from F[k] and add/subtract them to out[I[k], :]
I = np.array([[0,1],[1,2],[2,0]])
out = numpy.zeros_like(F)
for k, d in enumerate(I):
out[d[0], :] += F[k]
out[d[1], :] -= F[k]
out
# array([[-2, -2],
# [ 1, 1],
# [ 1, 1]])
However you can also change the meaning of I on its head and make it "pull-style", so it says
For row-index k in I, set vertex out[k] to be the difference of F[I[k]]
I = np.array([[0,2],[1,0],[2,1]])
out = numpy.zeros_like(F)
for k, d in enumerate(I):
out[k, :] = F[d[0], :] - F[d[1], :]
out
# array([[-2, -2],
# [ 1, 1],
# [ 1, 1]])
In which case the operation simplifies quite easily to mere fancy indexing:
out = F[I[:, 0], :] - F[I[:, 1], :]
# array([[-2, -2],
# [ 1, 1],
# [ 1, 1]])
You can preallocate an array to hold the shuffled forces and then use the index like so:
>>> N = I.max() + 1
>>> out = np.zeros((N, 2, 2), F.dtype)
>>> out[I, [1, 0]] = F[:, None, :]
>>> np.diff(out, axis=1).squeeze()
array([[-2, -2],
[ 1, 1],
[ 1, 1]])
or, equivalently,
>>> out = np.zeros((2, N, 2), F.dtype)
>>> out[[[1], [0]], I.T] = F
>>> np.diff(out, axis=0).squeeze()
array([[-2, -2],
[ 1, 1],
[ 1, 1]])
The way I understand the question, the values in the I array represent the vortex number, or the name of the vortex. They are not an actual positional index. Based on this thought, I have a different solution that uses the original I array. It does not quite come without loops, but should be OK for a reasonable number of vertices:
I = np.array([[0,1],[1,2],[2,0]])
F = np.array([[3,6],[4,7],[5,8]])
pos = I[:, 0]
neg = I[:, 1]
A = np.zeros_like(F)
unique = np.unique(I)
for i, vortex_number in enumerate(unique):
A[i] = F[np.where(pos==vortex_number)] - F[np.where(neg==vortex_number)]
# produces the expected result
# [[-2 -2]
# [ 1 1]
# [ 1 1]]
Maybe this loop can also be replaced by some numpy magic.

Python: Mapping between two arrays with an index array

I have a numpy array
src = np.random.rand(320,240)
and another numpy array idx of size (2 x (320*240)). Each column of idx indexes an entry in a result array dst, e.g., idx[:,20] = [3,10] references row 3, column 10 in dst and the assumption is that 20 corresponds to the flattened index of src, i.e., idx establishes a mapping between the entries of src and dst. Assuming dst is initialized with all zeros, how can I copy the entries in src to their destination in dst without a loop?
Here is the canonical way of doing it:
>>> import numpy as np
>>>
>>> src = np.random.rand(4, 3)
>>> src
array([[0.0309325 , 0.72261479, 0.98373595],
[0.06357406, 0.44763809, 0.45116039],
[0.63992938, 0.6445605 , 0.01267776],
[0.76084312, 0.61888759, 0.2138713 ]])
>>>
>>> idx = np.indices(src.shape).reshape(2, -1)
>>> np.random.shuffle(idx.T)
>>> idx
array([[3, 3, 0, 1, 0, 3, 1, 1, 2, 2, 2, 0],
[1, 2, 2, 0, 1, 0, 1, 2, 2, 1, 0, 0]])
>>>
>>> dst = np.empty_like(src)
>>> dst[tuple(idx)] = src.ravel()
>>> dst
array([[0.2138713 , 0.44763809, 0.98373595],
[0.06357406, 0.63992938, 0.6445605 ],
[0.61888759, 0.76084312, 0.01267776],
[0.45116039, 0.0309325 , 0.72261479]])
If you can't be sure that idx is a proper shuffle it's a bit safer to use np.full with a fill value that does not appear in src instead of np.empty.
>>> dst = np.full_like(src, np.nan)
>>> dst[tuple(idx)] = src.ravel()
>>>
>>> dst
array([[0.27020869, 0.71216066, nan],
[0.63812283, 0.69151451, 0.65843901],
[ nan, 0.02406174, 0.47543061],
[0.05650845, nan, nan]])
If you spot the fill value in dst, something is wrong with idx.
You can try:
dst[idx[0, :], idx[1, :]] = src.flat
In [33]: src = np.random.randn(2, 3)
In [34]: src
Out[34]:
array([[ 0.68636938, 0.60275041, 1.26078727],
[ 1.17937849, -1.0369404 , 0.42847611]])
In [35]: dst = np.zeros_like(src)
In [37]: idx = np.array([[0, 1, 0, 1, 0, 0], [1, 2, 0, 1, 2, 0]])
In [38]: dst[idx[0, :], idx[1, :]] = src.flat
In [39]: dst
Out[39]:
array([[ 0.42847611, 0.68636938, -1.0369404 ],
[ 0. , 1.17937849, 0.60275041]])
dst[0, 1] is src[0, 0], etc.

How I select/format only the values from a dictionary into a list or numpy array?

How do I get it to print just a list of the averages?
I just need it to be the exact same format as my np
arrays so I can compare them to see if they are the same or not.
Code:
import numpy as np
from pprint import pprint
centroids = np.array([[3,44],[4,15],[5,15]])
dataPoints = np.array([[2,4],[17,4],[45,2],[45,7],[16,32],[32,14],[20,56],[68,33]])
def size(vector):
return np.sqrt(sum(x**2 for x in vector))
def distance(vector1, vector2):
return size(vector1 - vector2)
def distances(array1, array2):
lists = [[distance(vector1, vector2) for vector2 in array2] for vector1 in array1]
#print lists.index(min, zip(*lists))
smallest = [min(zip(l,range(len(l)))) for l in zip(*lists)]
clusters = {}
for j, (_, i) in enumerate(smallest):
clusters.setdefault(i,[]).append(dataPoints[j])
pprint (clusters)
print'\nAverage of Each Point'
avgDict = {}
for k,v in clusters.iteritems():
avgDict[k] = sum(v)/ (len(v))
avgList = np.asarray(avgDict)
pprint (avgList)
distances(centroids,dataPoints)
Current Output:
{0: [array([16, 32]), array([20, 56])],
1: [array([2, 4])],
2: [array([17, 4]),
array([45, 2]),
array([45, 7]),
array([32, 14]),
array([68, 33])]}
Average of Each Point
array({0: array([18, 44]), 1: array([2, 4]), 2: array([41, 12])}, dtype=object)
Desired Output:
[[18,44],[2,4],[41,12]]
Or whatever the best format to compare my arrays/lists. I am aware I should have just stuck with one data type.
Do you try to cluster the dataPoints by the index of the nearest centroids, and find out the average position of the clustered points? If it is, I advise to use some broadcast rules of numpy to get the output you need.
Consider this,
np.linalg.norm(centroids[None, :, :] - dataPoints[:, None, :], axis=-1)
It creates a matrix showing all distances between dataPoints and centroids,
array([[ 40.01249805, 11.18033989, 11.40175425],
[ 42.3792402 , 17.02938637, 16.2788206 ],
[ 59.39696962, 43.01162634, 42.05948169],
[ 55.97320788, 41.77319715, 40.79215611],
[ 17.69180601, 20.80865205, 20.24845673],
[ 41.72529209, 28.01785145, 27.01851217],
[ 20.80865205, 44.01136217, 43.65775991],
[ 65.9241989 , 66.48308055, 65.520989 ]])
And you can compute the indices of the nearest centroids by this trick (they are split into 3 lines for readability),
In: t0 = centroids[None, :, :] - dataPoints[:, None, :]
In: t1 = np.linalg.norm(t0, axis=-1)
In: t2 = np.argmin(t1, axis=-1)
Now t2 has the indices,
array([1, 2, 2, 2, 0, 2, 0, 2])
To find the #1 cluster, use the boolean mask t2 == 0,
In: dataPoints[t2 == 0]
Out: array([[16, 32],
[20, 56]])
In: dataPoints[t2 == 1]
Out: array([[2, 4]])
In: dataPoints[t2 == 2]
Out: array([[17, 4],
[45, 2],
[45, 7],
[32, 14],
[68, 33]])
Or just calculate the average in your case,
In: np.mean(dataPoints[t2 == 0], axis=0)
Out: array([ 18., 44.])
In: np.mean(dataPoints[t2 == 1], axis=0)
Out: array([ 2., 4.])
In: np.mean(dataPoints[t2 == 2], axis=0)
Out: array([ 41.4, 12. ])
Of course, the latter blocks can be rewritten in for-loop if you want.
It might be a good practice to formulate the solution by numpy's conventions in my opinion.

How to make numpy.cumsum start after the first value

I have:
import numpy as np
position = np.array([4, 4.34, 4.69, 5.02, 5.3, 5.7, ..., 4])
x = (B/position**2)*dt
A = np.cumsum(x)
assert A[0] == 0 # I want this to be true.
Where B and dt are scalar constants. This is for a numerical integration problem with initial condition of A[0] = 0. Is there a way to set A[0] = 0 and then do a cumsum for everything else?
I don't understand what exactly your problem is, but here are some things you can do to have A[0] = 0.
You can create A to be longer by one index to have the zero as the first entry:
# initialize example data
import numpy as np
B = 1
dt = 1
position = np.array([4, 4.34, 4.69, 5.02, 5.3, 5.7])
# do calculation
A = np.zeros(len(position) + 1)
A[1:] = np.cumsum((B/position**2)*dt)
Result:
A = [ 0. 0.0625 0.11559096 0.16105356 0.20073547 0.23633533 0.26711403]
len(A) == len(position) + 1
Alternatively, you can manipulate the calculation to substract the first entry of the result:
# initialize example data
import numpy as np
B = 1
dt = 1
position = np.array([4, 4.34, 4.69, 5.02, 5.3, 5.7])
# do calculation
A = np.cumsum((B/position**2)*dt)
A = A - A[0]
Result:
[ 0. 0.05309096 0.09855356 0.13823547 0.17383533 0.20461403]
len(A) == len(position)
As you see, the results have different lengths. Is one of them what you expect?
1D cumsum
A wrapper around np.cumsum that sets first element to 0:
def cumsum(pmf):
cdf = np.empty(len(pmf) + 1, dtype=pmf.dtype)
cdf[0] = 0
np.cumsum(pmf, out=cdf[1:])
return cdf
Example usage:
>>> np.arange(1, 11)
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
>>> cumsum(np.arange(1, 11))
array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
N-D cumsum
A wrapper around np.cumsum that sets first element to 0, and works with N-D arrays:
def cumsum(pmf, axis=None, dtype=None):
if axis is None:
pmf = pmf.reshape(-1)
axis = 0
if dtype is None:
dtype = pmf.dtype
idx = [slice(None)] * pmf.ndim
# Create array with extra element along cumsummed axis.
shape = list(pmf.shape)
shape[axis] += 1
cdf = np.empty(shape, dtype)
# Set first element to 0.
idx[axis] = 0
cdf[tuple(idx)] = 0
# Perform cumsum on remaining elements.
idx[axis] = slice(1, None)
np.cumsum(pmf, axis=axis, dtype=dtype, out=cdf[tuple(idx)])
return cdf
Example usage:
>>> np.arange(1, 11).reshape(2, 5)
array([[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10]])
>>> cumsum(np.arange(1, 11).reshape(2, 5), axis=-1)
array([[ 0, 1, 3, 6, 10, 15],
[ 0, 6, 13, 21, 30, 40]])
I totally understand your pain, I wonder why Numpy doesn't allow this with np.cumsum. Anyway, though I'm really late and there's already another good answer, I prefer this one a bit more:
np.cumsum(np.pad(array, (1, 0), "constant"))
where array in your case is (B/position**2)*dt. You can change the order of np.pad and np.cumsum as well. I'm just adding a zero to the start of the array and calling np.cumsum.
You can use roll (shift right by 1) and then set the first entry to zero.

Categories

Resources