Consider it:
X = [1, 2, 3]
p = np.poly1d(X)
print('x: ', X, 'y: ', p(X))
output >> x: [1, 2, 3] y: [ 6 11 18]
what if I want to find x based on y?
x: [?, ?, ?] y: [ 6 11 18]
np.poly1d(X) means you create a polynomial of the required degree where X are its coefficients. Let's call them a, b, and c. So practically you have the expression
a*x**2 + b*x + c
When you then pass these three values for x, you get the following 3 equations
a**3 + b*a + c = 6
a*b**2 + b**2 + c = 11
a*c**2 + b*c + c = 18
There might be an algebraic way you can solve them yourself, but after a quick think I didn't come up with anything. However, sympy will happily solve this system of equations for you.
import numpy as np
import sympy as sym
def generate_y(X):
return np.poly1d(X)(X)
def solve_x(Y):
a, b, c = sym.symbols('a b c')
e1 = sym.Eq(a**3 + b*a + c, Y[0])
e2 = sym.Eq(a*b**2 + b**2 + c, Y[1])
e3 = sym.Eq(a*c**2 + b*c + c, Y[2])
return sym.solvers.solve([e1, e2, e3], [a, b, c])
For example
>>> solve_x(generate_y([1, 2, 3]))
[(1, 2, 3)]
>>> solve_x(generate_y([-5, 105, 2]))
[(-5, 105, 2)]
You could generalise this for nth order polynomials by creating the symbolic expressions dynamically, but for higher order you'll run into problems (such is life) and for 1st order you'll have multiple solutions.
def solve_x(Y):
symbols = sym.symbols('a:z')[:len(Y)]
X = sym.symbols('X')
expr = sum(s*X**i for i, s in enumerate(symbols[::-1]))
eqns = [sym.Eq(expr.subs({X: s}), y) for s, y in zip(symbols, Y)]
return sym.solvers.solve(eqns, symbols)
Usage
>>> solve_x(generate_y([1, 2]))
[(1, 2), (-1 + sqrt(2), 2*sqrt(2)), (-sqrt(2) - 1, -2*sqrt(2))]
>>> solve_x(generate_y([1, 2, 3, 4]))
# still computing...
[A,I] = histc([0.9828 0.4662 0.5245 0.9334 0.2163],[0.0191 0.2057 0.2820 0.2851 1.0000])
That is the MATLAB code with the results:
A =
0 1 0 4 0
I =
4 4 4 4 2
What I need is I. I've tried using np.histogram but it gives me this:
>>> a,b = np.histogram([0.9828 , 0.4662 , 0.5245 , 0.9334 , 0.2163],[0.0191 , 0.2057 , 0.2820 , 0.2851 , 1.0000])
>>> a
array([0, 1, 0, 4])
>>> b
array([ 0.0191, 0.2057, 0.282 , 0.2851, 1. ])
I want to get the bins that each element in my array/matrix goes into.
What you are looking for is numpy.digitize:
Return the indices of the bins to which each value in input array belongs.
>>> a = np.digitize([0.9828 , 0.4662 , 0.5245 , 0.9334 , 0.2163],[0.0191 , 0.2057 , 0.2820 , 0.2851 , 1.0000])
>>> print(a)
[4 4 4 4 2]
A correct python implementation of matlab's histc is the following:
import numpy as np
def histc(x, binranges):
indices = np.searchsorted(binranges, x)
return np.mod(indices+1, len(binranges)+1)
Correct implementation of MATLAB histc function in python (source).
def histc(x, bins):
map_to_bins = np.digitize(x, bins) # Get indices of the bins to which each value in input array belongs.
res = np.zeros(bins.shape)
for el in map_to_bins:
res[el-1] += 1 # Increment appropriate bin.
return res
numpy.digitize alone is not a complete reproduction of Matlab histc. This works:
import numpy as np
def histc(X, bins):
map_to_bins = np.digitize(X,bins)
r = np.zeros(bins.shape)
for i in map_to_bins:
r[i-1] += 1
return [r, map_to_bins]
if __name__=="__main__":
X = np.array([0.9828, 0.4662, 0.5245, 0.9334, 0.2163])
bins = np.array([0.0191, 0.2057, 0.2820, 0.2851, 1.0])
[A,I] = histc(X, bins)
print("X", X)
print("bins", bins)
print("A",A,"expecting", [0, 1, 0, 4, 0])
print("I",I,"expecting", [4, 4, 4, 4, 2])
I want to repeat elements of an array along axis 0 and axis 1 for M and N times respectively:
import numpy as np
a = np.arange(12).reshape(3, 4)
b = a.repeat(2, 0).repeat(2, 1)
print(b)
[[ 0 0 1 1 2 2 3 3]
[ 0 0 1 1 2 2 3 3]
[ 4 4 5 5 6 6 7 7]
[ 4 4 5 5 6 6 7 7]
[ 8 8 9 9 10 10 11 11]
[ 8 8 9 9 10 10 11 11]]
This works, but I want to know are there better methods without create a temporary array.
You could use the Kronecker product, see numpy.kron:
>>> a = np.arange(12).reshape(3,4)
>>> print(np.kron(a, np.ones((2,2), dtype=a.dtype)))
[[ 0 0 1 1 2 2 3 3]
[ 0 0 1 1 2 2 3 3]
[ 4 4 5 5 6 6 7 7]
[ 4 4 5 5 6 6 7 7]
[ 8 8 9 9 10 10 11 11]
[ 8 8 9 9 10 10 11 11]]
Your original method is OK too, though!
You can make use of np.broadcast_to here:
def broadcast_tile(a, h, w):
x, y = a.shape
m, n = x * h, y * w
return np.broadcast_to(
a.reshape(x, 1, y, 1), (x, h, y, w)
).reshape(m, n)
broadcast_tile(a, 2, 2)
array([[ 0, 0, 1, 1, 2, 2, 3, 3],
[ 0, 0, 1, 1, 2, 2, 3, 3],
[ 4, 4, 5, 5, 6, 6, 7, 7],
[ 4, 4, 5, 5, 6, 6, 7, 7],
[ 8, 8, 9, 9, 10, 10, 11, 11],
[ 8, 8, 9, 9, 10, 10, 11, 11]])
Performance
Functions
def chris(a, h, w):
x, y = a.shape
m, n = x * h, y * w
return np.broadcast_to(
a.reshape(x, 1, y, 1), (x, h, y, w)
).reshape(m, n)
def alex_riley(a, b0, b1):
r, c = a.shape
rs, cs = a.strides
x = np.lib.stride_tricks.as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0))
return x.reshape(r*b0, c*b1)
def paul_panzer(a, b0, b1):
r, c = a.shape
out = np.empty((r, b0, c, b1), a.dtype)
out[...] = a[:, None, :, None]
return out.reshape(r*b0, c*b1)
def wim(a, h, w):
return np.kron(a, np.ones((h,w), dtype=a.dtype))
Setup
import numpy as np
import pandas as pd
from timeit import timeit
res = pd.DataFrame(
index=['chris', 'alex_riley', 'paul_panzer', 'wim'],
columns=[5, 10, 20, 50, 100, 500, 1000],
dtype=float
)
a = np.arange(100).reshape((10,10))
for f in res.index:
for c in res.columns:
h = w = c
stmt = '{}(a, h, w)'.format(f)
setp = 'from __main__ import h, w, a, {}'.format(f)
res.at[f, c] = timeit(stmt, setp, number=50)
Output
Since the result cannot be implemented as a view, as_strided offers no benefits over simple preallocation and broadcasting. Because of its overhead as_strided seems in fact a bit slower (I did no proper benchmarking, though).
The as_strided code is taken from #AlexRiley's post.
from numpy.lib.stride_tricks import as_strided
import numpy as np
def tile_array(a, b0, b1):
r, c = a.shape # number of rows/columns
rs, cs = a.strides # row/column strides
x = as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0)) # view a as larger 4D array
return x.reshape(r*b0, c*b1) # create new 2D array
def tile_array_pp(a, b0, b1):
r, c = a.shape
out = np.empty((r, b0, c, b1), a.dtype)
out[...] = a[:, None, :, None]
return out.reshape(r*b0, c*b1)
a = np.arange(9).reshape(3, 3)
kwds = {'globals': {'f_ar': tile_array, 'f_pp': tile_array_pp, 'a': a},
'number': 1000}
from timeit import timeit
print('as_strided', timeit('f_ar(a, 100, 100)', **kwds))
print('broadcast ', timeit('f_pp(a, 100, 100)', **kwds))
Sample run:
as_strided 0.048387714981799945
broadcast 0.04324757700669579
Another solution is to use as_strided. kron is much slower then using repeat twice. I have found that as_strided is much faster than a double repeat in many cases (small arrays [<250x250] with only a doubling in each dimension as_strided was slower). The as_strided trick is as follows:
a = arange(1000000).reshape((1000, 1000)) # dummy data
from numpy.lib.stride_tricks import as_strided
N, M = 4,3 # number of time to replicate each point in each dimension
H, W = a.shape
b = as_strided(a, (H, N, W, M), (a.strides[0], 0, a.strides[1], 0)).reshape((H*N, W*M))
This works by using 0-length strides which causes numpy to read the same value multiple times (until it gets to the next dimension). The final reshape does copy the data, but only once unlike using a double repeat which will copy the data twice.
Errata: I'm only taking 2x upsampling into account.
TL;DR It turns out that after the OpenCV version,
np.repeat(np.repeat(a, 2, axis=1), 2, axis=0)
is the fastest. So the answer is - there's no faster ways in numpy today,
but you can get a slight improvement by changing the order of axes.
And if you don't mind OpenCV -
cv.resize(a, None, fx=2, fy=2, interpolation=cv.INTER_NEAREST)
Here is the test.
import timeit
import numpy as np
import cv2 as cv
test = np.zeros((16, 16, 3), dtype=np.float32)
def measure(f):
t = timeit.timeit("f(test)", number=1000, globals={"test": test, "f": f})
print("%s - %f"%(f.__name__, t))
return f, t
def fastest(c):
print(c.__name__)
winner, t = min((measure(getattr(c, ve)) for ve in dir(c) if ve.startswith("alg_")), key=lambda x: x[1])
print("%s winner: %s - %f"%(c.__name__, winner.__name__, t))
return winner
#fastest
class nn:
def alg_01(a):
return np.repeat(np.repeat(a, 2, axis=0), 2, axis=1)
def alg_02(a):
return np.repeat(np.repeat(a, 2, axis=1), 2, axis=0)
def alg_03(a):
b = a[:, None, :, None]
b = np.concatenate((b, b), axis=1)
b = np.concatenate((b, b), axis=3)
return b.reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_04(a):
b = a[:, None, :, None]
b = np.concatenate((b, b), axis=3)
b = np.concatenate((b, b), axis=1)
return b.reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_05(a):
return (a[:, None, :, None]*np.ones((1, 2, 1, 2)+((1,)*len(a.shape[2:])), dtype=np.float32)).reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_06(a):
return cv.resize(a, None, fx=2, fy=2, interpolation=cv.INTER_NEAREST)
def alg_07(a):
return a[:, None, :, None][:, (0, 0)][:, :, :, (0, 0)].reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_08(a):
return a[:, None, :, None][:, :, :, (0, 0)][:, (0, 0)].reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_09(a):
return np.kron(a, np.ones((2, 2), dtype=np.float32))
def alg_10(a):
return np.broadcast_to(a[:, None, :, None], (a.shape[0], 2, a.shape[1], 2)+a.shape[2:]).reshape(a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:])
def alg_11(a):
ret = np.empty((a.shape[0], 2, a.shape[1], 2, *a.shape[2:]), dtype=np.float32)
ret[...] = a[:, None, :, None]
ret.resize((a.shape[0]<<1, a.shape[1]<<1, *a.shape[2:]), refcheck=False)
return ret
The result is:
nn
alg_01 - 0.040967
alg_02 - 0.033744
alg_03 - 0.057969
alg_04 - 0.048739
alg_05 - 0.076595
alg_06 - 0.078638
alg_07 - 0.084692
alg_08 - 0.084539
alg_09 - 0.344339
alg_10 - 0.078707
alg_11 - 0.049424
nn winner: alg_02 - 0.033744