Why numpy.var is O(N) space? - python

I have an array of ~13GB. I call numpy.var on it to compute the variance. However, it allocates another ~13GB to do this. Why does it need O(N) space? Or am I calling numpy.var in a wrong way?
import numpy as np
# data = ...
print('Variance: ', np.var(data))

NumPy will create an intermediate array to compute abs(data - data.mean()) ** 2 in order to compute the variance. You can write your own variance function with a loop and make it fast with Numba:
import numpy as np
import numba as nb
#nb.njit(parallel=True)
def var_nb(a, ddof=0):
n = len(a)
s = a.sum()
m = s / (n - ddof)
v = 0
for i in nb.prange(n):
v += abs(a[i] - m) ** 2
return v / (n - ddof)
np.random.seed(100)
a = np.random.rand(100_000)
print(np.var(a))
# 0.08349747560941487
print(var_nb(a))
# 0.08349747560941487
%timeit np.var(a)
# 143 µs ± 414 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
%timeit var_nb(a)
# 40.2 µs ± 530 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)

This is faster whitout parallelization:
import numpy as np
def var(a: np.ndarray, axis: int = 0):
return np.sum(abs(a - (a.sum(axis=axis) / len(a))) ** 2, axis=axis) / len(a)

Related

How can I speed up iterating large list and summing values

# Generate test data
test = list(range(150))
groups = []
for _ in range(75_000):
groups.append(random.sample(test, 6))
Setup variables as numpy arrays:
# Best version
import numpy as np
import random
from numba import jit # Kind of optional see below
# Generate test data
test = list(range(150))
groups = np.array([random.sample(test, 6) for _ in range(75_000)])
# This will change every time but just leaving the same for example
scores_dict = {i: random.uniform(0, 120) for i in range(150)}
scores = np.array(list(scores_dict.items()))
Here's the vectorized version using numpy's sum and take:
def fun1(scores, groups):
for _ in range(6250):
c = np.sum(np.take(scores[:, 1], groups), axis=1)
return c
%timeit fun1(scores, groups) # Takes ~2.5 mins to run
18.6 s ± 625 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
If you really want to go all out you can try using numba on top of numpy:
#jit(nopython=True)
def fun2(scores, groups):
for _ in range(6250):
c = np.sum(np.take(scores[:, 1], groups), axis=1)
return c
%timeit fun2(scores, groups) # Takes ~1.2 mins to run
10.1 s ± 1.32 s per loop (mean ± std. dev. of 7 runs, 1 loop each)

A tedious loop looking for improvements

in my code I need to calculate the values of a vector many times which are the mean values from different patches of another array.
Here is an example of my code showing how I do it but I found that it is too less-efficient in running...
import numpy as np
vector_a = np.zeros(10)
array_a = np.random.random((100,100))
for i in range(len(vector_a)):
vector_a[i] = np.mean(array_a[:,i+20:i+40]
Is there any way to make it more efficient? Any comments or suggestions are very welcome! Many thanks!
-yes, the 20 and 40 are fixed.
EDIT:
Actually you can do this much faster. The previous function can be improved by operating on summed columns like this:
def rolling_means_faster1(array_a, n, first, size):
# Sum each relevant columns
sum_a = np.sum(array_a[:, first:(first + size + n - 1)], axis=0)
# Reshape as before
strides_b = (sum_a.strides[0], sum_a.strides[0])
array_b = np.lib.stride_tricks.as_strided(sum_a, (n, size), (strides_b))
# Average
v = np.sum(array_b, axis=1)
v /= (len(array_a) * size)
return v
Another way is to work with accumulated sums, adding and removing as necessary for each output element.
def rolling_means_faster2(array_a, n, first, size):
# Sum each relevant columns
sum_a = np.sum(array_a[:, first:(first + size + n - 1)], axis=0)
# Add a zero a the beginning so the next operation works fine
sum_a = np.insert(sum_a, 0, 0)
# Sum the initial `size` elements and add and remove partial sums as necessary
v = np.sum(sum_a[:size]) - np.cumsum(sum_a[:n]) + np.cumsum(sum_a[-n:])
# Average
v /= (size * len(array_a))
return v
Benchmarking with the previous solution from before:
import numpy as np
np.random.seed(100)
array_a = np.random.random((1000, 1000))
n = 100
first = 100
size = 200
%timeit rolling_means_orig(array_a, n, first, size)
# 12.7 ms ± 55.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit rolling_means(array_a, n, first, size)
# 5.49 ms ± 43.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit rolling_means_faster1(array_a, n, first, size)
# 166 µs ± 874 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
%timeit rolling_means_faster2(array_a, n, first, size)
# 182 µs ± 2.04 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
So these last two seem to be very close in performance. It may depend on the relative sizes of the inputs.
This is a possible vectorized solution:
import numpy as np
# Data
np.random.seed(100)
array_a = np.random.random((100, 100))
# Take all the relevant columns
slice_a = array_a[:, 20:40 + 10]
# Make a "rolling window" with stride tricks
strides_b = (slice_a.strides[1], slice_a.strides[0], slice_a.strides[1])
array_b = np.lib.stride_tricks.as_strided(slice_a, (10, 100, 20), (strides_b))
# Take mean
result = np.mean(array_b, axis=(1, 2))
# Original method for testing correctness
vector_a = np.zeros(10)
idv1 = np.arange(10) + 20
idv2 = np.arange(10) + 40
for i in range(len(vector_a)):
vector_a[i] = np.mean(array_a[:,idv1[i]:idv2[i]])
print(np.allclose(vector_a, result))
# True
Here is a quick benchmark in IPython (sizes increased for appreciation):
import numpy as np
def rolling_means(array_a, n, first, size):
slice_a = array_a[:, first:(first + size + n)]
strides_b = (slice_a.strides[1], slice_a.strides[0], slice_a.strides[1])
array_b = np.lib.stride_tricks.as_strided(slice_a, (n, len(array_a), size), (strides_b))
return np.mean(array_b, axis=(1, 2))
def rolling_means_orig(array_a, n, first, size):
vector_a = np.zeros(n)
idv1 = np.arange(n) + first
idv2 = np.arange(n) + (first + size)
for i in range(len(vector_a)):
vector_a[i] = np.mean(array_a[:,idv1[i]:idv2[i]])
return vector_a
np.random.seed(100)
array_a = np.random.random((1000, 1000))
n = 100
first = 100
size = 200
%timeit rolling_means(array_a, n, first, size)
# 5.48 ms ± 26.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit rolling_means_orig(array_a, n, first, size)
# 32.8 ms ± 762 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
This solution works on the assumption that you are trying to compute rolling average of a subset of window of columns.
As an example and ignoring rows, given [0, 1, 2, 3, 4] and a window of 2 the averages are [0.5, 1.5, 2.5, 3.5], and that you might only want the second and third averages.
Your current solution is inefficient as it is recomputes the mean for a column for each output in vector_a. Given that (a / n) + (b / n) == (a + b) / n we can get away with computing the mean of each column only once, and then combine the column means as needed to produce the final output.
window_first_start = idv1.min() # or idv1[0]
window_last_end = idv2.max() # or idv2[-1]
window_size = idv2[0] - idv1[0]
assert ((idv2 - idv1) == window_size).all(), "sanity check, not needed if assumption holds true"
# a view of the columns we are interested in, no copying is done here
view = array_a[:,window_first_start:window_last_end]
# calculate the means for each column
col_means = view.mean(axis=0)
# cumsum is used to find the rolling sum of means and so the rolling average
# We use an out variable to make sure we have a 0 in the first element of cum_sum.
# This makes like a little easier in the next step.
cum_sum = np.empty(len(col_means) + 1, dtype=col_means.dtype)
cum_sum[0] = 0
np.cumsum(col_means, out=cum_sum[1:])
result = (cum_sum[window_size:] - cum_sum[:-window_size]) / window_size
Having tested this against your own code, the above is significantly faster (increasing with the size of the input array), and slightly faster than the solution provided by jdehesa. With an input array of 1000x1000, it is two orders of magnitude faster than your solution and one order of magnitude faster than jdehesa's.
Try this:
import numpy as np
array_a = np.random.random((100,100))
vector_a = [np.mean(array_a[:,i+20:i+40]) for i in range(10)]

speeding up sympy matrix operations

I have written a code, which uses sympy to set up a matrix and a vector. The elements of these two are sympy symbols. Then I invert the matrix and multiply the inverted matrix and the vector. This should be a generic solver for linear equation systems with n variables. I am interested in the symbolic solution of these linear equations.
The problem is that my code is too slow.
For instance, for n=4 it takes roughly 30 sec but for n=7 I haven't been able to solve it so far, the code ran all night (8h) and hasn't finished in the morning.
This is my code.
from sympy import *
import pprint
MM = Matrix(niso,1, lambda i,j:var('MM_%s' % (i+1) ))
MA = Matrix (niso,1, lambda i,j:var('m_%s%s' % ('A', chr(66+i)) ) )
MX = Matrix (niso,1, lambda i,j:var('m_%s%s'% (chr(66+i), 'A')))
RB = Matrix(niso,1, lambda i,j:var('R_%s%s' % ('A'+chr(66+i),i+2)))
R = Matrix (niso, niso-1, lambda i,j: var('R_%s%d' % (chr(65+i) , j+2 )))
K= Matrix(niso-1,1, lambda i,j:var('K_%d' % (i+2) ) )
C= Matrix(niso-1,1, lambda i,j:var('A_%d' % i))
A = Matrix(niso-1,niso-1, lambda i,j:var('A_%d' % i))
b = Matrix(niso-1,1, lambda i,j:var('A_%d' % i))
for i in range(0,niso-1):
for j in range(0,niso-1):
A[i,j]=MM[j+1,0]*(Add(Mul(R[0,j],1/MA[i,0]/(RB[i,0]-R[0,i])))+R[i+1,j]/MX[i,0]/(-RB[i,0]+R[0,i]))
for i in range(0,niso-1):
b[i,0]=MM[0,0]*(Add(Mul(1,1/MA[i,0]/(RB[i,0]-R[0,i])))+1/MX[i,0]/(-RB[i,0]+R[0,i]))
A_in = Inverse(A)
if niso <= 4:
X =simplify(A_in*b)
if niso > 4:
X = A_in*b
pprint(X)
Is there a way to speed it up?
Don't invert! With n=4
%timeit soln = A.LUsolve(b)
697 µs ± 12.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
With n=10
%timeit soln = A.LUsolve(b)
431 ms ± 13 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)

Numpy efficient matrix self-multiplication (gram matrix)

I want to multiply B = A # A.T in numpy. Obviously, the answer would be a symmetric matrix (i.e. B[i, j] == B[j, i]).
However, it is not clear to me how to leverage this easily to cut the computation time down in half (by only computing the lower triangle of B and then using that to get the upper triangle for free).
Is there a way to perform this optimally?
As noted in #PaulPanzer's link, dot can detect this case. Here's the timing proof:
In [355]: A = np.random.rand(1000,1000)
In [356]: timeit A.dot(A.T)
57.4 ms ± 960 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [357]: B = A.T.copy()
In [358]: timeit A.dot(B)
98.6 ms ± 805 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
Numpy dot too clever about symmetric multiplications
You can always use sklearns's pairwise_distances
Usage:
from sklearn.metrics.pairwise import pairwise_distances
gram = pairwise_distance(x, metric=metric)
Where metric is a callable or a string defining one of their implemented metrics (full list in the link above)
But, I wrote this for myself a while back so I can share what I did:
import numpy as np
def computeGram(elements, dist):
n = len(elements)
gram = np.zeros([n, n])
for i in range(n):
for j in range(i + 1):
gram[i, j] = dist(elements[i], elements[j])
upTriIdxs = np.triu_indices(n)
gram[upTriIdxs] = gram.T[upTriIdxs]
return gram
Where dist is a callable, in your case np.inner

Computing distances on inhomogeneous vectors: python lists vs numpy array

In my project (about clustering algorithms, specifically k-medoids) is crucial to be able to compute pairwise distances efficiently. I have a dataset of ~60,000 objects. The problem is, distances must be computed between inhomogeneous vectors, i.e. vectors which may differ in length (in that case, missing items are treated as if they were 0).
Here is a minimal working example:
# %%
MAX_LEN = 11
N = 100
import random
def manhattan_distance(vec1, vec2):
n1, n2 = len(vec1), len(vec2)
n = min(n1, n2)
dist = 0
for i in range(n):
dist += abs(vec1[i] - vec2[i])
if n1 > n2:
for i in range(n, n1):
dist += abs(vec1[i])
else:
for i in range(n, n2):
dist += abs(vec2[i])
return dist
def compute_distances():
n = len(data)
for i in range(n):
for j in range(n):
manhattan_distance(data[i], data[j])
data = []
for i in range(N):
data.append([])
for k in range(random.randint(5, MAX_LEN)):
data[i].append(random.randint(0, 10))
%timeit compute_distances()
import numpy as np
def manhattan_distance_np(vec1, vec2):
return np.absolute(vec1 - vec2).sum()
def compute_distances_np():
n = len(data)
for i in range(n):
for j in range(n):
manhattan_distance_np(data_np[i], data_np[j])
data_np = [np.append(np.asarray(d), np.zeros(MAX_LEN - len(d))) for d in data]
%timeit compute_distances_np()
I was testing my Python lists implementation versus a numpy implementation.
Here are the results (computation times):
Python lists: 79.6 ms ± 3.78 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
numpy arrays: 226 ms ± 7.18 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Why is there such a huge difference? I supposed numpy arrays were really fast.
Is there a way to improve my code? Am I misunderstanding the inner workings of numpy?
Edit: I may need, in the future, to be able to use a custom distance function for pairwise distances computations. The method should work also for data sets of length 60'000 without running out of memory.
I believe you can just make your arrays dense and set the unused last elements to 0s.
import numpy as np
from scipy.spatial.distance import cdist, pdist, squareform
def batch_pdist(x, metric, batchsize=1000):
dists = np.zeros((len(x), len(x)))
for i in range(0, len(x), batchsize):
for j in range(0, len(x), batchsize):
dist_batch = cdist(x[i:i+batchsize], x[j:j+batchsize], metric=metric)
dists[i:i+batchsize, j:j+batchsize] = dist_batch
return dists
MIN_LEN = 5
MAX_LEN = 11
N = 10000
M = 10
data = []
data = np.zeros((N,MAX_LEN))
for i in range(N):
num_nonzero = np.random.randint(MIN_LEN, MAX_LEN)
data[i, :num_nonzero] = np.random.randint(0, M, num_nonzero)
dists = squareform(pdist(data, metric='cityblock'))
dists2 = batch_pdist(data, metric='cityblock', batchsize=500)
print((dists == dists2).all())
Timing Output:
%timeit squareform(pdist(data, metric='cityblock'))
43.8 µs ± 134 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
Edit:
For a custom distance function see the very bottom of this documentation.
I finally found probably the most straightforward way to solve this problem without changing too much the code and rely solely on computations and not on memory (since that could be unfeasible for very large datasets).
Based on juanpa.arrivillaga suggestion, I tried numba, that is a library that speeds up array-oriented and math-heavy Python code and is targeted mainly at numpy. You can read a good guide on optimizing Python code here: https://jakevdp.github.io/blog/2015/02/24/optimizing-python-with-numpy-and-numba/.
MAX_LEN = 11
N = 100
# Pure Python lists implementation.
import random
def manhattan_distance(vec1, vec2):
n1, n2 = len(vec1), len(vec2)
n = min(n1, n2)
dist = 0
for i in range(n):
dist += abs(vec1[i] - vec2[i])
if n1 > n2:
for i in range(n, n1):
dist += abs(vec1[i])
else:
for i in range(n, n2):
dist += abs(vec2[i])
return dist
def compute_distances():
n = len(data)
for i in range(n):
for j in range(n):
manhattan_distance(data[i], data[j])
data = []
for i in range(N):
data.append([])
for k in range(random.randint(5, MAX_LEN)):
data[i].append(random.randint(0, 10))
%timeit compute_distances()
# numpy+numba implementation.
import numpy as np
from numba import jit
#jit
def manhattan_distance_np(vec1, vec2):
return np.absolute(vec1 - vec2).sum()
#jit
def compute_distances_np():
n = len(data)
for i in range(n):
for j in range(n):
manhattan_distance_np(data_np[i], data_np[j])
data_np = np.array([np.append(np.asarray(d), np.zeros(MAX_LEN - len(d))) for d in data])
%timeit compute_distances_np()
Timing output:
%timeit compute_distances()
78.4 ms ± 3.44 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
%timeit compute_distances_np()
4.1 ms ± 14.7 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
As you can see, the numpy with numba optimizations is about 19 times faster (with no other code optimization involved).

Categories

Resources