How to repeat elements of an array along two axes? - python

I want to repeat elements of an array along axis 0 and axis 1 for M and N times respectively:
import numpy as np
a = np.arange(12).reshape(3, 4)
b = a.repeat(2, 0).repeat(2, 1)
print(b)
[[ 0 0 1 1 2 2 3 3]
[ 0 0 1 1 2 2 3 3]
[ 4 4 5 5 6 6 7 7]
[ 4 4 5 5 6 6 7 7]
[ 8 8 9 9 10 10 11 11]
[ 8 8 9 9 10 10 11 11]]
This works, but I want to know are there better methods without create a temporary array.

You could use the Kronecker product, see numpy.kron:
>>> a = np.arange(12).reshape(3,4)
>>> print(np.kron(a, np.ones((2,2), dtype=a.dtype)))
[[ 0 0 1 1 2 2 3 3]
[ 0 0 1 1 2 2 3 3]
[ 4 4 5 5 6 6 7 7]
[ 4 4 5 5 6 6 7 7]
[ 8 8 9 9 10 10 11 11]
[ 8 8 9 9 10 10 11 11]]
Your original method is OK too, though!

You can make use of np.broadcast_to here:
def broadcast_tile(a, h, w):
x, y = a.shape
m, n = x * h, y * w
return np.broadcast_to(
a.reshape(x, 1, y, 1), (x, h, y, w)
).reshape(m, n)
broadcast_tile(a, 2, 2)
array([[ 0, 0, 1, 1, 2, 2, 3, 3],
[ 0, 0, 1, 1, 2, 2, 3, 3],
[ 4, 4, 5, 5, 6, 6, 7, 7],
[ 4, 4, 5, 5, 6, 6, 7, 7],
[ 8, 8, 9, 9, 10, 10, 11, 11],
[ 8, 8, 9, 9, 10, 10, 11, 11]])
Performance
Functions
def chris(a, h, w):
x, y = a.shape
m, n = x * h, y * w
return np.broadcast_to(
a.reshape(x, 1, y, 1), (x, h, y, w)
).reshape(m, n)
def alex_riley(a, b0, b1):
r, c = a.shape
rs, cs = a.strides
x = np.lib.stride_tricks.as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0))
return x.reshape(r*b0, c*b1)
def paul_panzer(a, b0, b1):
r, c = a.shape
out = np.empty((r, b0, c, b1), a.dtype)
out[...] = a[:, None, :, None]
return out.reshape(r*b0, c*b1)
def wim(a, h, w):
return np.kron(a, np.ones((h,w), dtype=a.dtype))
Setup
import numpy as np
import pandas as pd
from timeit import timeit
res = pd.DataFrame(
index=['chris', 'alex_riley', 'paul_panzer', 'wim'],
columns=[5, 10, 20, 50, 100, 500, 1000],
dtype=float
)
a = np.arange(100).reshape((10,10))
for f in res.index:
for c in res.columns:
h = w = c
stmt = '{}(a, h, w)'.format(f)
setp = 'from __main__ import h, w, a, {}'.format(f)
res.at[f, c] = timeit(stmt, setp, number=50)
Output

Since the result cannot be implemented as a view, as_strided offers no benefits over simple preallocation and broadcasting. Because of its overhead as_strided seems in fact a bit slower (I did no proper benchmarking, though).
The as_strided code is taken from #AlexRiley's post.
from numpy.lib.stride_tricks import as_strided
import numpy as np
def tile_array(a, b0, b1):
r, c = a.shape # number of rows/columns
rs, cs = a.strides # row/column strides
x = as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0)) # view a as larger 4D array
return x.reshape(r*b0, c*b1) # create new 2D array
def tile_array_pp(a, b0, b1):
r, c = a.shape
out = np.empty((r, b0, c, b1), a.dtype)
out[...] = a[:, None, :, None]
return out.reshape(r*b0, c*b1)
a = np.arange(9).reshape(3, 3)
kwds = {'globals': {'f_ar': tile_array, 'f_pp': tile_array_pp, 'a': a},
'number': 1000}
from timeit import timeit
print('as_strided', timeit('f_ar(a, 100, 100)', **kwds))
print('broadcast ', timeit('f_pp(a, 100, 100)', **kwds))
Sample run:
as_strided 0.048387714981799945
broadcast 0.04324757700669579

Another solution is to use as_strided. kron is much slower then using repeat twice. I have found that as_strided is much faster than a double repeat in many cases (small arrays [<250x250] with only a doubling in each dimension as_strided was slower). The as_strided trick is as follows:
a = arange(1000000).reshape((1000, 1000)) # dummy data
from numpy.lib.stride_tricks import as_strided
N, M = 4,3 # number of time to replicate each point in each dimension
H, W = a.shape
b = as_strided(a, (H, N, W, M), (a.strides[0], 0, a.strides[1], 0)).reshape((H*N, W*M))
This works by using 0-length strides which causes numpy to read the same value multiple times (until it gets to the next dimension). The final reshape does copy the data, but only once unlike using a double repeat which will copy the data twice.

Errata: I'm only taking 2x upsampling into account.
TL;DR It turns out that after the OpenCV version,
np.repeat(np.repeat(a, 2, axis=1), 2, axis=0)
is the fastest. So the answer is - there's no faster ways in numpy today,
but you can get a slight improvement by changing the order of axes.
And if you don't mind OpenCV -
cv.resize(a, None, fx=2, fy=2, interpolation=cv.INTER_NEAREST)
Here is the test.
import timeit
import numpy as np
import cv2 as cv
test = np.zeros((16, 16, 3), dtype=np.float32)
def measure(f):
t = timeit.timeit("f(test)", number=1000, globals={"test": test, "f": f})
print("%s - %f"%(f.__name__, t))
return f, t
def fastest(c):
print(c.__name__)
winner, t = min((measure(getattr(c, ve)) for ve in dir(c) if ve.startswith("alg_")), key=lambda x: x[1])
print("%s winner: %s - %f"%(c.__name__, winner.__name__, t))
return winner
#fastest
class nn:
def alg_01(a):
return np.repeat(np.repeat(a, 2, axis=0), 2, axis=1)
def alg_02(a):
return np.repeat(np.repeat(a, 2, axis=1), 2, axis=0)
def alg_03(a):
b = a[:, None, :, None]
b = np.concatenate((b, b), axis=1)
b = np.concatenate((b, b), axis=3)
return b.reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_04(a):
b = a[:, None, :, None]
b = np.concatenate((b, b), axis=3)
b = np.concatenate((b, b), axis=1)
return b.reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_05(a):
return (a[:, None, :, None]*np.ones((1, 2, 1, 2)+((1,)*len(a.shape[2:])), dtype=np.float32)).reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_06(a):
return cv.resize(a, None, fx=2, fy=2, interpolation=cv.INTER_NEAREST)
def alg_07(a):
return a[:, None, :, None][:, (0, 0)][:, :, :, (0, 0)].reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_08(a):
return a[:, None, :, None][:, :, :, (0, 0)][:, (0, 0)].reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_09(a):
return np.kron(a, np.ones((2, 2), dtype=np.float32))
def alg_10(a):
return np.broadcast_to(a[:, None, :, None], (a.shape[0], 2, a.shape[1], 2)+a.shape[2:]).reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_11(a):
ret = np.empty((a.shape[0], 2, a.shape[1], 2, *a.shape[2:]), dtype=np.float32)
ret[...] = a[:, None, :, None]
ret.resize((a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:]), refcheck=False)
return ret
The result is:
nn
alg_01 - 0.040967
alg_02 - 0.033744
alg_03 - 0.057969
alg_04 - 0.048739
alg_05 - 0.076595
alg_06 - 0.078638
alg_07 - 0.084692
alg_08 - 0.084539
alg_09 - 0.344339
alg_10 - 0.078707
alg_11 - 0.049424
nn winner: alg_02 - 0.033744

Related

How to break a Numpy ndarray into blocks [duplicate]

Is there a way to slice a 2d array in numpy into smaller 2d arrays?
Example
[[1,2,3,4], -> [[1,2] [3,4]
[5,6,7,8]] [5,6] [7,8]]
So I basically want to cut down a 2x4 array into 2 2x2 arrays. Looking for a generic solution to be used on images.
There was another question a couple of months ago which clued me in to the idea of using reshape and swapaxes. The h//nrows makes sense since this keeps the first block's rows together. It also makes sense that you'll need nrows and ncols to be part of the shape. -1 tells reshape to fill in whatever number is necessary to make the reshape valid. Armed with the form of the solution, I just tried things until I found the formula that works.
You should be able to break your array into "blocks" using some combination of reshape and swapaxes:
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}"
assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}"
return (arr.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols))
turns c
np.random.seed(365)
c = np.arange(24).reshape((4, 6))
print(c)
[out]:
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
into
print(blockshaped(c, 2, 3))
[out]:
[[[ 0 1 2]
[ 6 7 8]]
[[ 3 4 5]
[ 9 10 11]]
[[12 13 14]
[18 19 20]]
[[15 16 17]
[21 22 23]]]
I've posted an inverse function, unblockshaped, here, and an N-dimensional generalization here. The generalization gives a little more insight into the reasoning behind this algorithm.
Note that there is also superbatfish's
blockwise_view. It arranges the
blocks in a different format (using more axes) but it has the advantage of (1)
always returning a view and (2) being capable of handling arrays of any
dimension.
It seems to me that this is a task for numpy.split or some variant.
e.g.
a = np.arange(30).reshape([5,6]) #a.shape = (5,6)
a1 = np.split(a,3,axis=1)
#'a1' is a list of 3 arrays of shape (5,2)
a2 = np.split(a, [2,4])
#'a2' is a list of three arrays of shape (2,5), (2,5), (1,5)
If you have a NxN image you can create, e.g., a list of 2 NxN/2 subimages, and then divide them along the other axis.
numpy.hsplit and numpy.vsplit are also available.
There are some other answers that seem well-suited for your specific case already, but your question piqued my interest in the possibility of a memory-efficient solution usable up to the maximum number of dimensions that numpy supports, and I ended up spending most of the afternoon coming up with possible method. (The method itself is relatively simple, it's just that I still haven't used most of the really fancy features that numpy supports so most of the time was spent researching to see what numpy had available and how much it could do so that I didn't have to do it.)
def blockgen(array, bpa):
"""Creates a generator that yields multidimensional blocks from the given
array(_like); bpa is an array_like consisting of the number of blocks per axis
(minimum of 1, must be a divisor of the corresponding axis size of array). As
the blocks are selected using normal numpy slicing, they will be views rather
than copies; this is good for very large multidimensional arrays that are being
blocked, and for very large blocks, but it also means that the result must be
copied if it is to be modified (unless modifying the original data as well is
intended)."""
bpa = np.asarray(bpa) # in case bpa wasn't already an ndarray
# parameter checking
if array.ndim != bpa.size: # bpa doesn't match array dimensionality
raise ValueError("Size of bpa must be equal to the array dimensionality.")
if (bpa.dtype != np.int # bpa must be all integers
or (bpa < 1).any() # all values in bpa must be >= 1
or (array.shape % bpa).any()): # % != 0 means not evenly divisible
raise ValueError("bpa ({0}) must consist of nonzero positive integers "
"that evenly divide the corresponding array axis "
"size".format(bpa))
# generate block edge indices
rgen = (np.r_[:array.shape[i]+1:array.shape[i]//blk_n]
for i, blk_n in enumerate(bpa))
# build slice sequences for each axis (unfortunately broadcasting
# can't be used to make the items easy to operate over
c = [[np.s_[i:j] for i, j in zip(r[:-1], r[1:])] for r in rgen]
# Now to get the blocks; this is slightly less efficient than it could be
# because numpy doesn't like jagged arrays and I didn't feel like writing
# a ufunc for it.
for idxs in np.ndindex(*bpa):
blockbounds = tuple(c[j][idxs[j]] for j in range(bpa.size))
yield array[blockbounds]
You question practically the same as this one. You can use the one-liner with np.ndindex() and reshape():
def cutter(a, r, c):
lenr = a.shape[0]/r
lenc = a.shape[1]/c
np.array([a[i*r:(i+1)*r,j*c:(j+1)*c] for (i,j) in np.ndindex(lenr,lenc)]).reshape(lenr,lenc,r,c)
To create the result you want:
a = np.arange(1,9).reshape(2,1)
#array([[1, 2, 3, 4],
# [5, 6, 7, 8]])
cutter( a, 1, 2 )
#array([[[[1, 2]],
# [[3, 4]]],
# [[[5, 6]],
# [[7, 8]]]])
Some minor enhancement to TheMeaningfulEngineer's answer that handles the case when the big 2d array cannot be perfectly sliced into equally sized subarrays
def blockfy(a, p, q):
'''
Divides array a into subarrays of size p-by-q
p: block row size
q: block column size
'''
m = a.shape[0] #image row size
n = a.shape[1] #image column size
# pad array with NaNs so it can be divided by p row-wise and by q column-wise
bpr = ((m-1)//p + 1) #blocks per row
bpc = ((n-1)//q + 1) #blocks per column
M = p * bpr
N = q * bpc
A = np.nan* np.ones([M,N])
A[:a.shape[0],:a.shape[1]] = a
block_list = []
previous_row = 0
for row_block in range(bpc):
previous_row = row_block * p
previous_column = 0
for column_block in range(bpr):
previous_column = column_block * q
block = A[previous_row:previous_row+p, previous_column:previous_column+q]
# remove nan columns and nan rows
nan_cols = np.all(np.isnan(block), axis=0)
block = block[:, ~nan_cols]
nan_rows = np.all(np.isnan(block), axis=1)
block = block[~nan_rows, :]
## append
if block.size:
block_list.append(block)
return block_list
Examples:
a = np.arange(25)
a = a.reshape((5,5))
out = blockfy(a, 2, 3)
a->
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
out[0] ->
array([[0., 1., 2.],
[5., 6., 7.]])
out[1]->
array([[3., 4.],
[8., 9.]])
out[-1]->
array([[23., 24.]])
For now it just works when the big 2d array can be perfectly sliced into equally sized subarrays.
The code bellow slices
a ->array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
into this
block_array->
array([[[ 0, 1, 2],
[ 6, 7, 8]],
[[ 3, 4, 5],
[ 9, 10, 11]],
[[12, 13, 14],
[18, 19, 20]],
[[15, 16, 17],
[21, 22, 23]]])
p ang q determine the block size
Code
a = arange(24)
a = a.reshape((4,6))
m = a.shape[0] #image row size
n = a.shape[1] #image column size
p = 2 #block row size
q = 3 #block column size
block_array = []
previous_row = 0
for row_block in range(blocks_per_row):
previous_row = row_block * p
previous_column = 0
for column_block in range(blocks_per_column):
previous_column = column_block * q
block = a[previous_row:previous_row+p,previous_column:previous_column+q]
block_array.append(block)
block_array = array(block_array)
If you want a solution that also handles the cases when the matrix is
not equally divided, you can use this:
from operator import add
half_split = np.array_split(input, 2)
res = map(lambda x: np.array_split(x, 2, axis=1), half_split)
res = reduce(add, res)
Here is a solution based on unutbu's answer that handle case where matrix cannot be equally divided. In this case, it will resize the matrix before using some interpolation. You need OpenCV for this. Note that I had to swap ncols and nrows to make it works, didn't figured why.
import numpy as np
import cv2
import math
def blockshaped(arr, r_nbrs, c_nbrs, interp=cv2.INTER_LINEAR):
"""
arr a 2D array, typically an image
r_nbrs numbers of rows
r_cols numbers of cols
"""
arr_h, arr_w = arr.shape
size_w = int( math.floor(arr_w // c_nbrs) * c_nbrs )
size_h = int( math.floor(arr_h // r_nbrs) * r_nbrs )
if size_w != arr_w or size_h != arr_h:
arr = cv2.resize(arr, (size_w, size_h), interpolation=interp)
nrows = int(size_w // r_nbrs)
ncols = int(size_h // c_nbrs)
return (arr.reshape(r_nbrs, ncols, -1, nrows)
.swapaxes(1,2)
.reshape(-1, ncols, nrows))
a = np.random.randint(1, 9, size=(9,9))
out = [np.hsplit(x, 3) for x in np.vsplit(a,3)]
print(a)
print(out)
yields
[[7 6 2 4 4 2 5 2 3]
[2 3 7 6 8 8 2 6 2]
[4 1 3 1 3 8 1 3 7]
[6 1 1 5 7 2 1 5 8]
[8 8 7 6 6 1 8 8 4]
[6 1 8 2 1 4 5 1 8]
[7 3 4 2 5 6 1 2 7]
[4 6 7 5 8 2 8 2 8]
[6 6 5 5 6 1 2 6 4]]
[[array([[7, 6, 2],
[2, 3, 7],
[4, 1, 3]]), array([[4, 4, 2],
[6, 8, 8],
[1, 3, 8]]), array([[5, 2, 3],
[2, 6, 2],
[1, 3, 7]])], [array([[6, 1, 1],
[8, 8, 7],
[6, 1, 8]]), array([[5, 7, 2],
[6, 6, 1],
[2, 1, 4]]), array([[1, 5, 8],
[8, 8, 4],
[5, 1, 8]])], [array([[7, 3, 4],
[4, 6, 7],
[6, 6, 5]]), array([[2, 5, 6],
[5, 8, 2],
[5, 6, 1]]), array([[1, 2, 7],
[8, 2, 8],
[2, 6, 4]])]]
I publish my solution. Notice that this code doesn't' actually create copies of original array, so it works well with big data. Moreover, it doesn't crash if array cannot be divided evenly (but you can easly add condition for that by deleting ceil and checking if v_slices and h_slices are divided without rest).
import numpy as np
from math import ceil
a = np.arange(9).reshape(3, 3)
p, q = 2, 2
width, height = a.shape
v_slices = ceil(width / p)
h_slices = ceil(height / q)
for h in range(h_slices):
for v in range(v_slices):
block = a[h * p : h * p + p, v * q : v * q + q]
# do something with a block
This code changes (or, more precisely, gives you direct access to part of an array) this:
[[0 1 2]
[3 4 5]
[6 7 8]]
Into this:
[[0 1]
[3 4]]
[[2]
[5]]
[[6 7]]
[[8]]
If you need actual copies, Aenaon code is what you are looking for.
If you are sure that big array can be divided evenly, you can use numpy splitting tools.
to add to #Aenaon answer and his blockfy function, if you are working with COLOR IMAGES/ 3D ARRAY here is my pipeline to create crops of 224 x 224 for 3 channel input
def blockfy(a, p, q):
'''
Divides array a into subarrays of size p-by-q
p: block row size
q: block column size
'''
m = a.shape[0] #image row size
n = a.shape[1] #image column size
# pad array with NaNs so it can be divided by p row-wise and by q column-wise
bpr = ((m-1)//p + 1) #blocks per row
bpc = ((n-1)//q + 1) #blocks per column
M = p * bpr
N = q * bpc
A = np.nan* np.ones([M,N])
A[:a.shape[0],:a.shape[1]] = a
block_list = []
previous_row = 0
for row_block in range(bpc):
previous_row = row_block * p
previous_column = 0
for column_block in range(bpr):
previous_column = column_block * q
block = A[previous_row:previous_row+p, previous_column:previous_column+q]
# remove nan columns and nan rows
nan_cols = np.all(np.isnan(block), axis=0)
block = block[:, ~nan_cols]
nan_rows = np.all(np.isnan(block), axis=1)
block = block[~nan_rows, :]
## append
if block.size:
block_list.append(block)
return block_list
then extended above to
for file in os.listdir(path_to_crop): ### list files in your folder
img = io.imread(path_to_crop + file, as_gray=False) ### open image
r = blockfy(img[:,:,0],224,224) ### crop blocks of 224 x 224 for red channel
g = blockfy(img[:,:,1],224,224) ### crop blocks of 224 x 224 for green channel
b = blockfy(img[:,:,2],224,224) ### crop blocks of 224 x 224 for blue channel
for x in range(0,len(r)):
img = np.array((r[x],g[x],b[x])) ### combine each channel into one patch by patch
img = img.astype(np.uint8) ### cast back to proper integers
img_swap = img.swapaxes(0, 2) ### need to swap axes due to the way things were proceesed
img_swap_2 = img_swap.swapaxes(0, 1) ### do it again
Image.fromarray(img_swap_2).save(path_save_crop+str(x)+"bounding" + file,
format = 'jpeg',
subsampling=0,
quality=100) ### save patch with new name etc

Circular Correlation in Theano

I'm trying to compute circular cross-correlation of two signals with Theano to use it in further calculation of loss that I would optimize over. But I'm not quite sure how to do that.
It is defined as following:
(f * g)[n] = sum_k f[k]g[k+n]
ccc[n] = \sum_k (f*g)[n-kN]
"periodic" summation or like "for each k-th component".
I could do an ordinary correlation and then perform periodic summation, but it's not quite clear how to do that (periodic summation) symbolically (using scan, probably?)
conv2d = T.signal.conv.conv2d
x = T.dmatrix()
y = T.dmatrix()
veclen = x.shape[1]
corr_expr = conv2d(x, y[:, ::-1], image_shape=(1, veclen), border_mode='full')
# circ_corr = T.sum([corr_expr[k::veclen] for k in T.arange(veclen)])
corr = theano.function([x, y], outputs=circ_corr)
corr( np.array([[2, 3, 5]]), np.array([[7, 11, 13]]) )
or use circular cross-correlation theorem and compute as a iFFT(FFT(x)*FFT(y)):
import theano.sandbox.fourier as dft
x = T.dmatrix()
y = T.dvector()
veclen = x.shape[1]
exp = T.real(
dft.ifft(
dft.fft(x, veclen, axis=1)
* dft.fft(y[::-1], y.shape[0], axis=1).reshape((1, -1)),
veclen, axis=1
)
)[:, ::-1]
f = theano.function([x, y], outputs=exp)
f(np.array([[2, 3, 5], [3, 4, 4], [5, 6, 7]]), np.array([7, 11, 13]) )
but in this case I can't actually compute a gradient because gradient for ifft (and all functions that has something to do with complex numbers in general, afaik) is not implemented yet, I guess (aborts with an error: Elemwise{real,no_inplace}.grad illegally returned an integer-valued variable. (Input index 0, dtype complex128))
Here's a working solution I came up with (definitely not optimal as soon as FFT is not used):
def circular_crosscorelation(X, y):
"""
Input:
symbols for X [n, m]
and y[m,]
Returns:
symbol for circular cross corelation of each of row in X with
cc[n, m]
"""
n, m = X.shape
corr_expr = T.signal.conv.conv2d(X, y[::-1].reshape((1, -1)), image_shape=(1, m), border_mode='full')
corr_len = corr_expr.shape[1]
pad = m - corr_len%m
v_padded = T.concatenate([corr_expr, T.zeros((n, pad))], axis=1)
circ_corr_exp = T.sum(v_padded.reshape((n, v_padded.shape[1] / m, m)), axis=1)
return circ_corr_exp[:, ::-1]
X = T.dmatrix()
y = T.dmatrix()
cc = theano.function([X, y], circular_crosscorelation(X, y))
print cc( np.array([[2, 3, 5], [4, 5, 6]]), np.array([[7, 11, 13]]) )
returns
[[ 94. 108. 108.]
[ 149. 157. 159.]]
as expected.
And can be analytically differentiated:
score = T.sum(circ_corr_exp**2)
grad = T.grad(score, x)
g = theano.function([x, y], outputs=grad)
print g( np.array([[2, 3, 5], [4, 5, 6]]), np.array([[7, 11, 13]]) )
>> [[ 6332. 6388. 6500.]
>> [ 9554. 9610. 9666.]]
here's also few more options (through direct circulant calculation) and time-comparation:
def circulant_np(v):
row = np.arange(len(v))
col = -np.arange(len(v))
idx = (row[:, np.newaxis] + col)%len(v)
return v[idx]
print circulant_np(np.array([1, 2, 3, 5]))
def c_corr_np(a, b):
return circulant_np(a).dot(b[::-1])
def circulant_t(v):
row = T.arange(v.shape[0])
col = -T.arange(v.shape[0])
idx = (row.reshape((-1, 1)) + col)%v.shape[0]
return v[idx]
def c_corr_t_f(a, b):
""" 1d correlation using circulant matrix """
return circulant_t(a).dot(b[::-1])
a = T.dvector('a')
b = T.dvector('b')
c_corr_t = theano.function([a, b], c_corr_t_f(a, b))
print c_corr_np(np.array([2, 3, 5]), np.array([7, 11, 13]))
print c_corr_t(np.array([2, 3, 5]), np.array([7, 11, 13]))
print c_corr( np.array([[2, 3, 5]]), np.array([[7, 11, 13]]) )
%timeit c_corr_np(np.array([2, 3, 5]), np.array([7, 11, 13]))
%timeit c_corr_t(np.array([2, 3, 5]), np.array([7, 11, 13]))
%timeit c_corr( np.array([[2, 3, 5]]), np.array([[7, 11, 13]]) ) # = circular_crosscorelation
which gives
10000 loops, best of 3: 30.6 µs per loop
10000 loops, best of 3: 132 µs per loop
10000 loops, best of 3: 149 µs per loop
and inverse cross-corr:
def inverse_circular_crosscorelation(y):
"""
Input:
symbol for y[1, m]
Returns:
symbol for y_inv s.t.
cc( y, y_inv ) = (1, 0 ... 0)
"""
A = circulant_t(y.reshape((-1, )))
b = T.concatenate([T.zeros((y.shape[1] - 1, )), T.ones((1, ))]).reshape((-1, 1))
return T.nlinalg.matrix_inverse(A).dot(b).reshape((1, -1))[:, ::-1]

What is a pythonic way of finding maximum values and their indices for moving subarrays for numpy ndarray?

I have numpy ndarrays which could be 3 or 4 dimensional. I'd like to find maximum values and their indices in a moving subarray window with specified strides.
For example, suppose I have a 4x4 2d array and my moving subarray window is 2x2 with stride 2 for simplicity:
[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9,10,11,12],
[13,14,15,16]].
I'd like to find
[[ 6 8],
[14 16]]
for max values and
[(1,1), (3,1),
(3,1), (3,3)]
for indices as output.
Is there a concise, efficient implementation for this for ndarray without using loops?
Here's a solution using stride_tricks:
def make_panes(arr, window):
arr = np.asarray(arr)
r,c = arr.shape
s_r, s_c = arr.strides
w_r, w_c = window
if c % w_c != 0 or r % w_r != 0:
raise ValueError("Window doesn't fit array.")
shape = (r / w_r, c / w_c, w_r, w_c)
strides = (w_r*s_r, w_c*s_c, s_r, s_c)
return np.lib.stride_tricks.as_strided(arr, shape, strides)
def max_in_panes(arr, window):
w_r, w_c = window
r, c = arr.shape
panes = make_panes(arr, window)
v = panes.reshape((-1, w_r * w_c))
ix = np.argmax(v, axis=1)
max_vals = v[np.arange(r/w_r * c/w_c), ix]
i = np.repeat(np.arange(0,r,w_r), c/w_c)
j = np.tile(np.arange(0, c, w_c), r/w_r)
rel_i, rel_j = np.unravel_index(ix, window)
max_ix = i + rel_i, j + rel_j
return max_vals, max_ix
A demo:
>>> vals, ix = max_in_panes(x, (2,2))
>>> print vals
[[ 6 8]
[14 16]]
>>> print ix
(array([1, 1, 3, 3]), array([1, 3, 1, 3]))
Note that this is pretty untested, and is designed to work with 2d arrays. I'll leave the generalization to n-d arrays to the reader...

How to vectorize multiple levels of recursion?

I am a noobie to python and numpy (and programming in general). I am trying to speed up my code as much as possible. The math involves several summations over multiple axes of a few arrays. I've attained one level of vectorization, but I can't seem to get any deeper than that and have to resort to for loops (I believe there's three levels of recursion, M, N, and I, one of which I've eliminated, I). Here's my code for the relevant section (this code works, but I'd like to speed it up):
def B1(n, i):
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
for n in N:
B[n, :] = B1(n, I)
for m in M:
for n in N:
C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
A = np.linalg.solve(C[1:, 1:], (0.25)*Y[1:])
dmaxi is just a float and m, n and i are integers. The arrays have the following shapes:
>>> qi.shape
(551,)
>>> N.shape
(18,)
>>> M.shape
(18,)
>>> I.shape
(551,)
>>> Iq.shape
(1, 551, 3)
>>> U.shape
(1, 551, 3)
As you can see I've vectorized the calculation of the 2nd axis of B, but I can't seem to do it for the 1st axis, C, and Y, which still require the for loops. It seems that when I try to do the same form of vectorization that I did for the 1st axis of B (define a function, then give the array as the argument), I get a broadcasting error since it appears to be trying to calculate both axes simultaneously, rather than the 1st, then the 2nd, which is why I had to force it into a for loop instead. The same problem occurs for both C and Y which is why they're both in for loops also. In case that's confusing, essentially what I tried was:
>>> B[:, :] = B1(N, I)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sasrec_v6.py", line 155, in B1
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
ValueError: operands could not be broadcast together with shapes (18) (551)
Vectorizing the 2nd axis of B made a substantial improvement to the speed of my code, so I'm assuming that the same will apply for further vectorization (I hope I'm using that term correctly by the way).
You can use broadcasting to make 2d arrays from your 1d index vectors. I haven't tested these yet, but they should work:
If you reshape the N to be a column vector, then B1 will return a 2d array:
B[N] = B1(N[:, None], I)
For Y and C, I'd use np.einsum to have better control over which axes are mulitplied (probably this could be done with np.dot as well but I'm not sure how.
C[M[:, None], N] = np.einsum('ij,kj->ik',
B[M]/np.square(qi*Iq[0, :, 2]),
B[N])
Y[M] = np.einsum('i, ki->k',
U[0, :, 1]/np.square(qi*Iq[0, :, 2]),
B[M])
To see what that indexing trick does:
In [1]: a = np.arange(3)
In [2]: a
Out[2]: array([0, 1, 2])
In [3]: a[:, None]
Out[3]:
array([[0],
[1],
[2]])
In [4]: b = np.arange(4,1,-1)
In [5]: b
Out[5]: array([4, 3, 2])
In [6]: a[:, None] * b
Out[6]:
array([[0, 0, 0],
[4, 3, 2],
[8, 6, 4]])
It saves two orders of magnitude in time:
In [92]: %%timeit
....: B = np.zeros((18, 551))
....: C = np.zeros((18, 18))
....: Y = np.zeros((18))
....: for n in N:
....: B[n, :] = B1(n, I)
....: for m in M:
....: for n in N:
....: C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
....: Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
....:
100 loops, best of 3: 15.8 ms per loop
In [93]: %%timeit
....: Bv = np.zeros((18, 551))
....: Cv = np.zeros((18, 18))
....: Yv = np.zeros((18))
....: Bv[N] = B1(N[:, None], I)
....: Cv[M[:, None], N] = np.einsum('ij,kj->ik', B[M]/np.square(qi*Iq[0, :, 2]), B[N])
....: Yv[M] = np.einsum('i, ki->k', U[0, :, 1]/np.square(qi*Iq[0, :, 2]), B[M])
....:
1000 loops, best of 3: 1.34 ms per loop
Here's my test:
import numpy as np
# make fake data:
np.random.seed(5)
qi = np.random.rand(551)
N = np.random.randint(0,18,18)#np.arange(18)
M = np.random.randint(0,18,18)#np.arange(18)
I = np.arange(551)
Iq = np.random.rand(1, 551, 3)
U = np.random.rand(1, 551, 3)
B = np.zeros((18, 551))
C = np.zeros((18, 18))
Y = np.zeros((18))
Bv = np.zeros((18, 551))
Cv = np.zeros((18, 18))
Yv = np.zeros((18))
dmaxi = 1.
def B1(n, i):
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
for n in N:
B[n, :] = B1(n, I)
for m in M:
for n in N:
C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
Bv[N] = B1(N[:, None], I)
print "B correct?", np.allclose(Bv, B)
# np.einsum test case:
n, m = 2, 3
a = np.arange(n*m).reshape(n,m)*8 + 2
b = np.arange(n*m)[::-1].reshape(n,m)
c = np.empty((n,n))
for i in range(n):
for j in range(n):
c[i,j] = np.dot(a[i],b[j])
cv = np.einsum('ij,kj->ik', a, b)
print "einsum test successful?", np.allclose(c,cv)
Cv[M[:, None], N] = np.einsum('ij,kj->ik',
B[M]/np.square(qi*Iq[0, :, 2]),
B[N])
print "C correct?", np.allclose(Cv, C)
Yv[M] = np.einsum('i, ki->k',
U[0, :, 1]/np.square(qi*Iq[0, :, 2]),
B[M])
print "Y correct?", np.allclose(Yv, Y)
output :D
B correct? True
einsum test successful? True
C correct? True
Y correct? True

Summing values of numpy array based on indices in other array

Assume I have the following arrays:
N = 8
M = 4
a = np.zeros(M)
b = np.random.randint(M, size=N) # contains indices for a
c = np.random.rand(N) # contains random values
I want to sum the values of c according to the indices provided in b, and store them in a. Writing a loop for this is trivial:
for i, v in enumerate(b):
a[v] += c[i]
Since N can get quite big in my real-world problem I'd like to avoid using python loops, but I can't figure out how to write it as a numpy-statement. Can anyone help me out?
Ok, here some example values:
In [27]: b
Out[27]: array([0, 1, 2, 0, 2, 3, 1, 1])
In [28]: c
Out[28]:
array([ 0.15517108, 0.84717734, 0.86019899, 0.62413489, 0.24357903,
0.86015187, 0.85813481, 0.7071174 ])
In [30]: a
Out[30]: array([ 0.77930596, 2.41242955, 1.10377802, 0.86015187])
import numpy as np
N = 8
M = 4
b = np.array([0, 1, 2, 0, 2, 3, 1, 1])
c = np.array([ 0.15517108, 0.84717734, 0.86019899, 0.62413489, 0.24357903, 0.86015187, 0.85813481, 0.7071174 ])
a = ((np.mgrid[:M,:N] == b)[0] * c).sum(axis=1)
returns
array([ 0.77930597, 2.41242955, 1.10377802, 0.86015187])

Categories

Resources