How to vectorize slicing operations on torch tensors? - python

I have a bunch of variables expressed by list comprehensions. I want to turn it into torch.tensor, so far I got
import torch
n = 10
y = torch.rand(n ** 2, requires_grad=True)
one_node_per_position = torch.FloatTensor([sum(y[k:k + n]) - 1 for k in range(0, n ** 2, n)])
one_node_per_point = torch.FloatTensor([sum(y[j::n]) - 1 for j in range(n)])
connectivity = torch.FloatTensor([sum(y[k:k + n]) - sum(y[k - n:k]) for k in range(n, n ** 2, n)])
But it obviously doesn't look good. How can I rewrite it to get advantage of vectorization for further using?

You can use the as_strided operator to generate views of the original array using vectorized operations, and apply the sum to these views:
import torch
n = 10
y = torch.rand(n**2, requires_grad=True)
# Vectorized version
vec_one_node_per_position = torch.as_strided(y, (n, n), (n, 1)).sum(axis=-1) - 1
vec_one_node_per_point = torch.as_strided(y, (n, n), (1, n)).sum(axis=-1) - 1
vec_connectivity = (
torch.as_strided(y, (n - 1, n), (n, 1), n).sum(axis=-1)
- torch.as_strided(y, (n - 1, n), (n, 1)).sum(axis=-1)
)
# Ensure consistency with comprehension-based version
one_node_per_position = torch.FloatTensor([sum(y[k : k + n]) - 1 for k in range(0, n**2, n)])
one_node_per_point = torch.FloatTensor([sum(y[j::n]) - 1 for j in range(n)])
connectivity = torch.FloatTensor([sum(y[k : k + n]) - sum(y[k - n : k]) for k in range(n, n**2, n)])
assert torch.isclose(vec_one_node_per_position, one_node_per_position).all()
assert torch.isclose(vec_one_node_per_point, one_node_per_point).all()
assert torch.isclose(vec_connectivity, connectivity).all()

Related

Finding roots of an equation involving a summation using sympy

I am currently to new to sympy and I am trying to reproduce the Mathematica example in the attached image in Python. My attempt is written below but it returns an empty list
import sympy
m , n, D_star, a, j = sympy.symbols('m , n, D_star, a, j')
s1 = sympy.Sum(a**(j-1),(j, 1, m-1))
rhs = 6 * sympy.sqrt((D_star * (1 + a)*(n - 1))/2)
expand_expr = sympy.solve(s1 - rhs, m)
temp = sympy.lambdify((a, n, D_star), expand_expr, 'numpy')
n = 100
a = 1.2
D_star = 2.0
ms = temp(1.2, 100, 2.0)
ms
# what I get is an empty list []
# expected answer using Mma FindRoot function is 17.0652
Adding .doit() to expand the sum seems to help. It gives Piecewise((m - 1, Eq(a, 1)), ((a - a**m)/(1 - a), True))/a for the sum in s1.
from sympy import symbols, Eq, Sum, sqrt, solve, lambdify
m, n, j, a, D_star = symbols('m n j a D_star')
s1 = Sum(a**(j - 1), (j, 1, m - 1)).doit()
rhs = 6 * sqrt((D_star * (1 + a) * (n - 1)) / 2)
expand_expr = solve(Eq(s1, rhs), m)
temp = lambdify((a, n, D_star), expand_expr, 'numpy')
n = 100
a = 1.2
D_star = 2.0
ms = temp(1.2, 100, 2.0)
This gives for expand_expr:
[Piecewise((log(a*(3*sqrt(2)*a*sqrt(D_star*(a*n - a + n - 1)) - 3*sqrt(2)*sqrt(D_star*(a*n - a + n - 1)) + 1))/log(a), Ne(a, 1)), (nan, True)),
Piecewise((3*sqrt(2)*a*sqrt(D_star*(a*n - a + n - 1)) + 1, Eq(a, 1)), (nan, True))]
which separates into a != 1 and a == 1.
The result of ms gives [array(17.06524172), array(nan)], again in a bit awkward way to separate a hypothetical a == 1.

Vectorizing softmax cross-entropy gradient

I'm trying to implement my own neural network with (almost) fully vectorized operations. There are lots of posts out there but I can't seem to find one that fits all three of these:
separate cross-entropy and softmax terms in the gradient calculation (so I can interchange the last activation and loss)
multi-class classification (y is one-hot encoded)
all operations are fully vectorized
My main question is: How do I get to dE/dz (N x K) given dE/da (N x K) and da/dz (N x K x K) using a fully vectorized operation? i.e. How do I vectorize dE_dz_test2?
My second question is:
Is there a better way to write softmax_derivative?
I used this as a reference for calculating the gradient one sample at a time:
http://saitcelebi.com/tut/output/part2.html
and this for figuring out how to do backprop
https://peterroelants.github.io/posts/neural-network-implementation-part04/
def one_hot_encode(y, n_classes):
y_onehot = np.zeros((len(y), n_classes))
for i, y_i in enumerate(y):
y_onehot[i, y_i] = 1
return y_onehot
def cross_entropy_derivative(y_true, y_pred):
# dE / da
# input: N x K
# output: N x K array
N = len(y_true)
return -(y_true / y_pred) / N
def softmax(x):
# activation (a)
# input: N x K array
# output: N x K array
# https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
exp = np.exp(x - np.max(x))
return exp / np.sum(exp, axis=1)[:, None]
def softmax_derivative(Z):
# da/dz
#input: N x K array
#output: N x K x K array
#http://saitcelebi.com/tut/output/part2.html
N, K = Z.shape
s = softmax(Z)[:, :, np.newaxis]
a = np.tensordot(s, np.ones((1, K)), axes=([-1],[0]))
I = np.repeat(np.eye(K, K)[np.newaxis, :, :], N, axis=0)
b = I - np.tensordot(np.ones((K, 1)), s.T, axes=([-1],[0])).T
return a * np.swapaxes(b, 1, 2)
def softmax_derivative_test(Z):
# da/dz
# non-vectorized softmax gradient calculation
#http://saitcelebi.com/tut/output/part2.html
N, K = Z.shape
da_dz = np.zeros((N, K, K))
kron_delta = np.eye(K)
s = softmax(Z)
for n in range(N):
for i in range(K):
for j in range(K):
da_dz[n, i, j] = s[n, i] * (kron_delta[i, j] - s[n, j])
return da_dz
def dE_dz_test2(dE_da, da_dz):
# array (N x K)
# array (N x K x K)
# output: array (N x K)
N, K = dE_da.shape
dE_dz = np.zeros((N, K))
for n in range(N):
dE_dz[n, :] = np.matmul(da_dz[n], dE_da[n, :, np.newaxis]).T
return dE_dz
def some_type_of_matrix_multiplication_(dE_da, da_dz):
# how do i get dE/dz from dE_da and da_dz
pass
X = np.random.rand(100, 2)
W = np.random.rand(2, 4)
y = np.random.randint(0, 4, size=100)
y = one_hot_encode(y, 4)
Z = X # W
S = softmax(Z)
N, K = Z.shape
# da / dz for softmax
da_dz = softmax_derivative(Z) # (100, 4, 4)
da_dz_test = softmax_derivative_test(Z) # (100, 4, 4) - non vectorized implementation
print(np.isclose(da_dz, da_dz_test).all()) # equivalence test
dE_da = cross_entropy_derivative(y, S) # (100, 4)
dE_dz = some_type_of_matrix_multiplication_(dE_da, da_dz) # what do I do here? *****
dE_dz_test = (S - y) / N # (100, 4) If you combine dE/da and da/dz terms
dE_dz_test2 = dE_dz_test2(dE_da, da_dz)
print(np.isclose(dE_dz_test, dE_dz_test2).all()) # equivalence test
True
True
Here is an approach using np.einsum:
def da_dz_pp(z,sm=None):
if sm is None:
sm = softmax(z)
res = np.einsum('ij,ik->ijk',sm,-sm)
np.einsum('ijj->ij',res)[...] += sm
return res
def dE_dz_pp(y,z,sm=None):
if sm is None:
sm = softmax(z)
dE_da = cross_entropy_derivative(y,sm)
da_dz = da_dz_pp(z,sm)
return np.einsum('ij,ijk->ik',dE_da,da_dz)
It seems to reproduce what your code outputs and is a bit faster.

Wiki example for Arnoldi iteration only works for real matrices?

The Wikipedia entry for the Arnoldi method provides a Python example that produces basis of the Krylov subspace of a matrix A. Supposedly, if A is Hermitian (i.e. if A == A.conj().T) then the Hessenberg matrix h generated by this algorithm is tridiagonal (source). However, when I use the Wikipedia code on a real-world Hermitian matrix, the Hessenberg matrix is not at all tridiagonal. When I perform the computation on the real part of A (so that A == A.T) then I do get a tridiagonal Hessenberg matrix, so there seems to be a problem with the imaginary components of A. Does anybody know why the Wikipedia code doesn't produce the expected results?
Working example:
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import circulant
def arnoldi_iteration(A, b, n):
m = A.shape[0]
h = np.zeros((n + 1, n), dtype=np.complex)
Q = np.zeros((m, n + 1), dtype=np.complex)
q = b / np.linalg.norm(b) # Normalize the input vector
Q[:, 0] = q # Use it as the first Krylov vector
for k in range(n):
v = A.dot(q) # Generate a new candidate vector
for j in range(k + 1): # Subtract the projections on previous vectors
h[j, k] = np.dot(Q[:, j], v)
v = v - h[j, k] * Q[:, j]
h[k + 1, k] = np.linalg.norm(v)
eps = 1e-12 # If v is shorter than this threshold it is the zero vector
if h[k + 1, k] > eps: # Add the produced vector to the list, unless
q = v / h[k + 1, k] # the zero vector is produced.
Q[:, k + 1] = q
else: # If that happens, stop iterating.
return Q, h
return Q, h
# Construct matrix A
N = 2**4
I = np.eye(N)
k = np.fft.fftfreq(N, 1.0 / N) + 0.5
alpha = np.linspace(0.1, 1.0, N)*2e2
c = np.fft.fft(alpha) / N
C = circulant(c)
A = np.einsum("i, ij, j->ij", k, C, k)
# Show that A is Hermitian
print(np.allclose(A, A.conj().T))
# Arbitrary (random) initial vector
np.random.seed(0)
v = np.random.rand(N)
# Perform Arnoldi iteration with complex A
_, h = arnoldi_iteration(A, v, N)
# Perform Arnoldi iteration with real A
_, h2 = arnoldi_iteration(np.real(A), v, N)
# Plot results
plt.subplot(121)
plt.imshow(np.abs(h))
plt.title("Complex A")
plt.subplot(122)
plt.imshow(np.abs(h2))
plt.title("Real A")
plt.tight_layout()
plt.show()
Result:
After browsing through some conference presentation slides, I realised that at some point Q had to be conjugated when A is complex. The correct algorithm is posted below for reference, with the code change marked (note that this correction has also been submitted to the Wikipedia entry):
import numpy as np
def arnoldi_iteration(A, b, n):
m = A.shape[0]
h = np.zeros((n + 1, n), dtype=np.complex)
Q = np.zeros((m, n + 1), dtype=np.complex)
q = b / np.linalg.norm(b)
Q[:, 0] = q
for k in range(n):
v = A.dot(q)
for j in range(k + 1):
h[j, k] = np.dot(Q[:, j].conj(), v) # <-- Q needs conjugation!
v = v - h[j, k] * Q[:, j]
h[k + 1, k] = np.linalg.norm(v)
eps = 1e-12
if h[k + 1, k] > eps:
q = v / h[k + 1, k]
Q[:, k + 1] = q
else:
return Q, h
return Q, h

Hermite interpolation in Python

I have this program for calculating Hermite interpolation.
Problem is, that its behave really bad.
This is chart for 35 Chebyshev nodes. If I put more points, peak on the beginning will be higher(its about 10^7 with this amount of nodes).
I interpolated this same function using Lagrange method (green, its shifted so it can be seen) and as you can see it looks fine.
Here is the code:
def hermit_interpolate(input): #input is list of tuples [(x1,y1),(x2,y2)...] xi are Chebyshev nodes
points = [(input[0][0], input[0][1] - 0), (input[0][0], calculate_f_p_x(input[0][0]))] #"input[0][1] - 0" this is just to change type of second element
#calculate_f_p_x returns value of derivative
for k in range(1, len(input)): #Divided differences and derivatives in one list alternately
points.append((input[k][0], (input[k][1] - input[k - 1][1]) / (
input[k][0] - input[k - 1][0])))
points.append((input[k][0], calculate_f_p_x(input[k][0])))
x, c = zip(*points)
x = list(x)
c = list(c)
n = len(points)
for i in range(2, n): #calculating factors
for j in range(n - 1, i - 1, -1):
c[j] = (c[j] - c[j - 1]) / (x[j] - x[j - i])
def result_polynomial(xpoint): #here is function to calculate value for given x
val = c[0]
factor = 1.0
for l in range(1, n):
factor *= (xpoint - x[l - 1])
val += (c[l] * factor)
return val
return result_polynomial
I can't seen what's wrong here.
Thanks!
This code actually works:
def hermit_interpolate(input): #input is list of tuples [(x1,y1),(x2,y2),...,(xn,yn)] xi are Chebyshev nodes
n = len(input)
points = numpy.zeros(shape=(2 * n + 1, 2 * n + 1))
X, Y = zip(*input)
X = list(X)
Y = list(Y)
for i in range(0, 2 * n, 2):
points[i][0] = X[i / 2]
points[i + 1][0] = X[i / 2]
points[i][1] = Y[i / 2]
points[i + 1][1] = Y[i / 2]
for i in range(2, 2 * n + 1):
for j in range(1 + (i - 2), 2 * n):
if i == 2 and j % 2 == 1:
points[j][i] = calculate_f_p_x(X[j / 2]);
else:
points[j][i] = (points[j][i - 1] - points[j - 1][i - 1]) / (
points[j][0] - points[(j - 1) - (i - 2)][0])
def result_polynomial(xpoint): #here is function to calculate value for given x
val = 0
for i in range(0, 2 * n):
factor = 1.
j = 0
while j < i:
factor *= (xpoint - X[j / 2])
if j + 1 != i:
factor *= (xpoint - X[j / 2])
j += 1
j += 1
val += factor * points[i][i + 1]
return val
return result_polynomia

How to set up and solve simultaneous equations in python

For a fixed integer n, I have a set of 2(n-1) simultaneous equations as follows.
M(p) = 1+((n-p-1)/n)*M(n-1) + (2/n)*N(p-1) + ((p-1)/n)*M(p-1)
N(p) = 1+((n-p-1)/n)*M(n-1) + (p/n)*N(p-1)
M(1) = 1+((n-2)/n)*M(n-1) + (2/n)*N(0)
N(0) = 1+((n-1)/n)*M(n-1)
M(p) is defined for 1 <= p <= n-1. N(p) is defined for 0 <= p <= n-2. Notice also that p is just a constant integer in every equation so the whole system is linear.
I have been using Maple but I would like to set these up and solve them in python now, maybe using numpy.linalg.solve (or any other better method). I actually only want the value of M(n-1). For example, when n=2 the answer should be M(1) = 4, I believe. This is because the equations become
M(1) = 1+(2/2)*N(0)
N(0) = 1 + (1/2)*M(1)
Therefore
M(1)/2 = 1+1
and so
M(1) = 4.
If I want to plug in n=50, say, how can you set up this system of simultaneous equations in python so that numpy.linalg.solve can solve them?
Update The answers are great but they use dense solvers where the system of equations is sparse. Posted follow up to Using scipy sparse matrices to solve system of equations .
Updated: added implementation using scipy.sparse
This gives the solution in the order N_max,...,N_0,M_max,...,M_1.
The linear system to solve is of the shape A dot x == const 1-vector.
x is the sought after solution vector.
Here I ordered the equations such that x is N_max,...,N_0,M_max,...,M_1.
Then I build up the A-coefficient matrix from 4 block matrices.
Here's a snapshot for the example case n=50 showing how you can derive the coefficient matrix and understand the block structure. The coefficient matrix A is light blue, the constant right side is orange. The sought after solution vector x is here light green and used to label the columns. The first column show from which of the above given eqs. the row (= eq.) has been derived:
As Jaime suggested, multiplying by n improves the code. This is not reflected in the spreadsheet above but has been implemented in the code below:
Implementation using numpy:
import numpy as np
import numpy.linalg as linalg
def solve(n):
# upper left block
n_to_M = -2. * np.eye(n-1)
# lower left block
n_to_N = (n * np.eye(n-1)) - np.diag(np.arange(n-2, 0, -1), 1)
# upper right block
m_to_M = n_to_N.copy()
m_to_M[1:, 0] = -np.arange(1, n-1)
# lower right block
m_to_N = np.zeros((n-1, n-1))
m_to_N[:,0] = -np.arange(1,n)
# build A, combine all blocks
coeff_mat = np.hstack(
(np.vstack((n_to_M, n_to_N)),
np.vstack((m_to_M, m_to_N))))
# const vector, right side of eq.
const = n * np.ones((2 * (n-1),1))
return linalg.solve(coeff_mat, const)
Solution using scipy.sparse:
from scipy.sparse import spdiags, lil_matrix, vstack, hstack
from scipy.sparse.linalg import spsolve
import numpy as np
def solve(n):
nrange = np.arange(n)
diag = np.ones(n-1)
# upper left block
n_to_M = spdiags(-2. * diag, 0, n-1, n-1)
# lower left block
n_to_N = spdiags([n * diag, -nrange[-1:0:-1]], [0, 1], n-1, n-1)
# upper right block
m_to_M = lil_matrix(n_to_N)
m_to_M[1:, 0] = -nrange[1:-1].reshape((n-2, 1))
# lower right block
m_to_N = lil_matrix((n-1, n-1))
m_to_N[:, 0] = -nrange[1:].reshape((n-1, 1))
# build A, combine all blocks
coeff_mat = hstack(
(vstack((n_to_M, n_to_N)),
vstack((m_to_M, m_to_N))))
# const vector, right side of eq.
const = n * np.ones((2 * (n-1),1))
return spsolve(coeff_mat.tocsr(), const).reshape((-1,1))
Example for n=4:
[[ 7.25 ]
[ 7.76315789]
[ 8.10526316]
[ 9.47368421] # <<< your result
[ 9.69736842]
[ 9.78947368]]
Example for n=10:
[[ 24.778976 ]
[ 25.85117842]
[ 26.65015984]
[ 27.26010007]
[ 27.73593401]
[ 28.11441922]
[ 28.42073207]
[ 28.67249606]
[ 28.88229939]
[ 30.98033266] # <<< your result
[ 31.28067182]
[ 31.44628982]
[ 31.53365219]
[ 31.57506477]
[ 31.58936225]
[ 31.58770694]
[ 31.57680467]
[ 31.560726 ]]
Here's an entirely different approach, using sympy. It's not fast, but it allows me to copy the RHS of your equations exactly, limiting the thinking I need to do (always a plus), and gives fractional answers.
from sympy import Integer, Symbol, Eq, solve
def build_equations(n):
ni = n
n = Integer(n)
Ms = {p: Symbol("M{}".format(p)) for p in range(ni)}
Ns = {p: Symbol("N{}".format(p)) for p in range(ni-1)}
M = lambda i: Ms[int(i)] if i >= 1 else 0
N = lambda i: Ns[int(i)]
M_eqs = {}
M_eqs[1] = Eq(M(1), 1+((n-2)/n)*M(n-1) + (2/n)*N(0))
for p in range(2, ni):
M_eqs[p] = Eq(M(p), 1+((n-p-1)/n)*M(n-1) + (2/n)*N(p-1) + ((p-1)/n)*M(p-1))
N_eqs = {}
N_eqs[0] = Eq(N(0), 1+((n-1)/n)*M(n-1))
for p in range(1, ni-1):
N_eqs[p] = Eq(N(p), 1+((n-p-1)/n)*M(n-1) + (p/n)*N(p-1))
return M_eqs.values() + N_eqs.values()
def solve_system(n, show=False):
eqs = build_equations(n)
sol = solve(eqs)
if show:
print 'equations:'
for eq in sorted(eqs):
print eq
print 'solution:'
for var, val in sorted(sol.items()):
print var, val, float(val)
return sol
which gives
>>> solve_system(2, True)
equations:
M1 == N0 + 1
N0 == M1/2 + 1
solution:
M1 4 4.0
N0 3 3.0
{M1: 4, N0: 3}
>>> solve_system(3, True)
equations:
M1 == M2/3 + 2*N0/3 + 1
M2 == M1/3 + 2*N1/3 + 1
N0 == 2*M2/3 + 1
N1 == M2/3 + N0/3 + 1
solution:
M1 34/5 6.8
M2 33/5 6.6
N0 27/5 5.4
N1 5 5.0
{M2: 33/5, M1: 34/5, N1: 5, N0: 27/5}
and
>>> solve_system(4, True)
equations:
M1 == M3/2 + N0/2 + 1
M2 == M1/4 + M3/4 + N1/2 + 1
M3 == M2/2 + N2/2 + 1
N0 == 3*M3/4 + 1
N1 == M3/2 + N0/4 + 1
N2 == M3/4 + N1/2 + 1
solution:
M1 186/19 9.78947368421
M2 737/76 9.69736842105
M3 180/19 9.47368421053
N0 154/19 8.10526315789
N1 295/38 7.76315789474
N2 29/4 7.25
{N2: 29/4, N1: 295/38, M1: 186/19, M3: 180/19, N0: 154/19, M2: 737/76}
which seems to match the other answers.
This is messy, but solves your problem, barring a very probable mistake transcribing the coefficients:
from __future__ import division
import numpy as np
n = 2
# Solution vector is [N[0], N[1], ..., N[n - 2], M[1], M[2], ..., M[n - 1]]
n_pos = lambda p : p
m_pos = lambda p : p + n - 2
A = np.zeros((2 * (n - 1), 2 * (n - 1)))
# p = 0
# N[0] + (1 - n) / n * M[n-1] = 1
A[n_pos(0), n_pos(0)] = 1 # N[0]
A[n_pos(0), m_pos(n - 1)] = (1 - n) / n #M[n - 1]
for p in xrange(1, n - 1) :
# M[p] + (1 + p - n) /n * M[n - 1] - 2 / n * N[p - 1] +
# (1 - p) / n * M[p - 1] = 1
A[m_pos(p), m_pos(p)] = 1 # M[p]
A[m_pos(p), m_pos(n - 1)] = (1 + p - n) / n # M[n - 1]
A[m_pos(p), n_pos(p - 1)] = -2 / n # N[p - 1]
if p > 1 :
A[m_pos(p), m_pos(p - 1)] = (1 - p) / n # M[p - 1]
# N[p] + (1 + p -n) / n * M[n - 1] - p / n * N[p - 1] = 1
A[n_pos(p), n_pos(p)] = 1 # N[p]
A[n_pos(p), m_pos(n - 1)] = (1 + p - n) / n # M[n - 1]
A[n_pos(p), n_pos(p - 1)] = -p / n # N[p - 1]
if n > 2 :
# p = n - 1
# M[n - 1] - 2 / n * N[n - 2] + (2 - n) / n * M[n - 2] = 1
A[m_pos(n - 1), m_pos(n - 1)] = 1 # M[n - 1]
A[m_pos(n - 1), n_pos(n - 2)] = -2 / n # N[n - 2]
A[m_pos(n - 1), m_pos(n - 2)] = (2 - n) / n # M[n - 2]
else :
# p = 1
#M[1] - 2 / n * N[0] = 1
A[m_pos(n - 1), m_pos(n - 1)] = 1
A[m_pos(n - 1), n_pos(n - 2)] = -2 / n
X = np.linalg.solve(A, np.ones((2 * (n - 1),)))
But it gives a solution of
>>> X[-1]
6.5999999999999979
for M(2) when n=3, which is not what you came up with.

Categories

Resources