Numpy Covariance - python

When there is a zero mean applied to the numpy matrix, is there a difference expected between, the following 2 codes? I was learning andrew ng's ML course and he suggested to use X # X^T to find the covariance matrix(considering the zero mean is applied). When I tried to visually examine the matrix, found it gives diff result with np.cov function.. Please help..
import numpy as np
X=np.random.randint(0,9,(3,3))
print(X)
[[2 1 5]
[7 4 8]
[4 7 6]]
X = (X - X.mean(axis=0)) # <- Zero Mean
print(X)
[[-2.33333333 -3. -1.33333333]
[ 2.66666667 0. 1.66666667]
[-0.33333333 3. -0.33333333]]
cov1 = (X # X.T)/m # <- Find covariance manually as suggested in the course
print(cov1)
[[ 5.40740741 -2.81481481 -2.59259259]
[-2.81481481 3.2962963 -0.48148148]
[-2.59259259 -0.48148148 3.07407407]]
cov2 = np.cov(X,bias=True) # <- Find covariance with np.cov
print(cov2)
[[ 0.7037037 0.59259259 -1.2962963 ]
[ 0.59259259 1.81481481 -2.40740741]
[-1.2962963 -2.40740741 3.7037037 ]]

If your observations are in rows and variables are in columns (set rowvar to False), then it must be x.T # x:
import numpy as np
x0 = np.array([[2, 1, 5], [7, 4, 8], [4, 7, 6]])
x = x0 - x0.mean(axis=0)
cov1 = x.T # x / 3
cov2 = np.cov(x, rowvar=False, bias=True)
assert np.allclose(cov1, cov2)
x # x.T is for the case when the observations are in columns and variables are in the rows:
x = x0 - x0.mean(axis=1)[:,None]
cov1 = x # x.T / 3
cov2 = np.cov(x, bias=True) # rowvar=True by default
assert np.allclose(cov1, cov2)

Related

How to divide a tensor elements to specific indixes

I have a tensor like this:
out = [[ 3, 6, 5, 4],
[ 6, 5, 10, 13],
[ 5, 10, 6, 22],
[ 4, 13, 22, 9]]
And this is a symmetrical matrix. What I want to do is to divide each element by the values in the same indexes of diagonal. So the values of diagonal in this matrix are:
index0 = 3
index1 = 5
index2 = 6
index3 = 9
The result will look like this:
[[3 , 6/(3*5) , 5/(3*6) , 4/(3*9) ]
[6/(3*5), 5 , 10/(5*6), 13/(5*9)]
[5/(3*6), 10/(5*6), 6 , 22/(6*9)]
[4/(3*9), 13/(5*9), 22/(6*9), 9 ]]
Let me walk through the first row:
3 is the value in the diagonal so we will skip it
6/3*5, 6 is the value at index 0 and 1 so that I will divide 6 by the diagonal values in index0 and 1.
5/3*6, 5 is the value at index 0 and 2 so that I will divide it by diagonal values at index 0 and 2
4/3*9, 4 is the value at index 0 and 3 so that I will divide it by diagonal values at index 0 and 3
It could be done as following in tensorflow (or numpy).
We take the original matrix and zero out diagonal.
We divide the resulted matrix by diagonal vector.
We transpose result from 2 and divide it again by diagonal vector.
We add diagonal that we zeroed out in step 1.
import tensorflow as tf
out = [[ 3, 6, 5, 4],
[ 6, 5, 10, 13],
[ 5, 10, 6, 22],
[ 4, 13, 22, 9]]
tensor = tf.constant(out, dtype=tf.float32)
diag_indices = tf.tile(tf.range(tf.shape(tensor)[0])[..., None], [1, 2])
diag = tf.gather_nd(tensor, diag_indices) # [3. 5. 6. 9.]
diag_matrix = tf.linalg.tensor_diag(diag)
zero_diag_matrix = tensor - diag_matrix
res = tf.transpose(zero_diag_matrix / diag) / diag + diag_matrix
with tf.Session() as sess:
print(res.eval())
# [[3. 0.4 0.27777776 0.14814815]
# [0.4 5. 0.33333334 0.28888887]
# [0.27777776 0.3333333 6. 0.4074074 ]
# [0.14814815 0.28888887 0.4074074 9. ]]
Using numpy, you could do as follows:
import numpy as np
out = out.astype(float)
# diagonal elements in out
d = np.copy(np.diagonal(out))
# Indices of lower triangular matriX
tril_ix = np.tril_indices_from(out, k=-1)
# cumulative sum of the diagonal values
# over the first axis on a square matrix
dx = np.cumsum(np.diag(d), 1)
# replicate ove lower triangular
dx[tril_ix] += np.rot90(dx, k=1)[::-1][tril_ix]
# same but accumulating the diagonal elements
# upwards on the y axis
dy = np.cumsum(np.diag(d)[::-1],0)[::-1]
# replicate ove rlower triangular
dy[tril_ix] += np.rot90(dy, k=1)[::-1][tril_ix]
# mask where to apply the product
m = dy!=0
# perform div and mult
out[m] = out[m]/(dx[m]*dy[m])
np.fill_diagonal(out, d)
print(out)
array([[3. , 0.4 , 0.27777778, 0.14814815],
[0.4 , 5. , 0.33333333, 0.28888889],
[0.27777778, 0.33333333, 6. , 0.40740741],
[0.14814815, 0.28888889, 0.40740741, 9. ]])
Here's a tensorflow version.
import tensorflow as tf
import numpy as np
out = tf.Variable([[ 3, 6, 5, 4],
[ 6, 5, 10, 13],
[ 5, 10, 6, 22],
[ 4, 13, 22, 9]], dtype=tf.float32)
# this solution only works for square matrices
assert out.shape[-2] == out.shape[-1]
out_diag = tf.linalg.diag_part(out)
res = tf.Variable(tf.zeros(out.shape, dtype=tf.float32))
for i in tf.range(out.shape[0]):
_ = res[..., (i+1):, i].assign(out[..., (i+1):, i] / out_diag[..., (i+1):] / out_diag[..., i])
_ = res[..., i, (i+1):].assign(out[..., i, (i+1):] / out_diag[..., (i+1):] / out_diag[..., i])
print(res)

Tensorflow 2 gradient gives nan results for pow

The following simplified code outputs nan for derivatives when x=0. I'm running tensorflow 2.0.0.
import tensorflow as tf
x = tf.Variable([[-1.0], [0.0], [1.0]])
with tf.GradientTape(persistent=True) as t:
t.watch(x)
# case 1: y = x^4
# y = tf.reduce_sum(tf.pow(x, 4), axis=1) # gives nan for 2nd to 5th derivative at x=0
# case 2: y = x + x^2 + x^3 + x^4
y = tf.reduce_sum(tf.pow(x, [[1, 2, 3, 4]]), axis=1) # gives nan for 2nd to 5th derivative at x=0
dy_dx = t.gradient(y, x)
d2y_dx2 = t.gradient(dy_dx, x)
d3y_dx3 = t.gradient(d2y_dx2, x)
d4y_dx4 = t.gradient(d3y_dx3, x)
d5y_dx5 = t.gradient(d4y_dx4, x)
del t
tf.print(y)
tf.print(tf.transpose(dy_dx)) # transpose only to fit on one line when printed
tf.print(tf.transpose(d2y_dx2))
tf.print(tf.transpose(d3y_dx3))
tf.print(tf.transpose(d4y_dx4))
tf.print(tf.transpose(d5y_dx5))
This outputs correct values except when x=0:
[0 0 4]
[[-2 1 10]]
[[8 -nan(ind) 20]]
[[-18 -nan(ind) 30]]
[[24 -nan(ind) 24]]
[[0 -nan(ind) 0]]
If you run the tf.pow(x, 4) case instead, the nan only shows up for the 5th derivative:
[1 0 1]
[[-4 0 4]]
[[12 0 12]]
[[-24 0 24]]
[[24 24 24]]
[[-0 -nan(ind) 0]]
So my questions are:
The tensorflow documentation doesn't explicitly say that the pow function supports two parameters of different size, but the first output y is correct. Anyone have experience with this? I'm expecting a matrix of all 3 input x values raised to all 4 powers.
Is the nan value returned from the gradient a bug I should report? I did find this previous possibly related issue, but it was fixed: https://github.com/tensorflow/tfjs/issues/346

Solve the linear equations system AX = B in Python, np.linalg.solve not working

I'm trying to solve the linear equation AX=B where A,X,B are Matrices.
I've tried using the np.linalg.solve function of numpy but the result seems to be wrong.
Example:
Matrix A
[9 1 8]
[3 2 5]
[1 6 5]
Matrix B
[7 0 5]
[7 8 4]
[5 6 7]
So to solve X, i've used:
X = np.linalg.solve(A,B)
The result is:
X
[ 1.17521368 -0.17948718 0.40598291]
[ 0.20512821 -0.30769231 0.74358974]
[-0.56410256 -0.15384615 1.20512821]
But if i try to verify the result by multiplying A by X, the result is anything but B:
B
[ 5.40598291 -2.02564103 8.86752137]
[ 7.61111111 -4.33333333 13.61111111]
[ 3.15811966 -3.82051282 14.92735043]
If i use this:
np.matmul(B, np.linalg.inv(A))
Instead of the solve function, i get the same results.
Is there something i am missing here?
EDIT 1:
I've printed
np.allclose(np.dot(A, X), B)
And is returning False
EDIT 2
Here is the code i'm using:
B = np.array([7,0,5,7,8,4,5,6,7]).reshape(3,3)
A = np.array([9,1,8,3,2,5,1,6,5]).reshape(3,3)
X = np.linalg.solve(A,B)
print(x)
#[[-1.70967742 -4.48387097 0.08064516]
# [-1.35483871 -2.74193548 0.79032258]
# [ 2.96774194 5.38709677 0.43548387]]
My apologies if this is a very basic question, i appreciate any help.
Thanks.
My results with your arrays look right:
In [582]: A=np.array([9,1,8,3,2,5,1,6,5]).reshape(3,3)
In [583]: B=np.array([7,0,5,7,8,4,5,6,7]).reshape(3,3)
In [584]: x=np.linalg.solve(A,B)
In [585]: x
Out[585]:
array([[-1.70967742, -4.48387097, 0.08064516],
[-1.35483871, -2.74193548, 0.79032258],
[ 2.96774194, 5.38709677, 0.43548387]])
In [586]: A#x
Out[586]:
array([[7., 0., 5.],
[7., 8., 4.],
[5., 6., 7.]])
The other approach: AX=B => X=1/A B:
In [591]: np.linalg.inv(A)#B
Out[591]:
array([[-1.70967742, -4.48387097, 0.08064516],
[-1.35483871, -2.74193548, 0.79032258],
[ 2.96774194, 5.38709677, 0.43548387]])
And formally testing for equality:
In [602]: np.allclose(A#np.linalg.solve(A, B), B)
Out[602]: True
The result of X is correct. To verify if your solution is correct, as per the official docs you can use allclose() which should return True if two arrays (AX and B)are equal element-wise, within a tolerance.
import numpy as np
A = np.array([[9, 1, 8], [3, 2, 5], [1, 6, 5]])
B = np.array([[7, 0, 5], [7, 8, 4], [5, 6, 7]])
X = np.linalg.solve(A,B)
np.allclose(np.dot(A, X), B)
# True
In your case, it indeed returns True.

Manually project coordinates similar to gluLookAt in python

I'm trying to implement viewing matrix and projection, similar to gluLookAt to get the view position of each 3D coordinate. I have implemented something that seems close to working but is reversed.
For example - the following code gets the correct position (When I actually don't change the coordinates. But if I change the up-vector to point towards X instead of Y, I get reversed coordinates.
import numpy as np
def normalize_vector(vector):
return vector / (np.linalg.norm(vector))
def get_lookat_matrix(position_vector, front_vector, up_vector):
m1 = np.zeros([4, 4], dtype=np.float32)
m2 = np.zeros([4, 4], dtype=np.float32)
z = normalize_vector(-front_vector)
x = normalize_vector(np.cross(up_vector, z))
y = np.cross(z, x)
m1[:3, 0] = x
m1[:3, 1] = y
m1[:3, 2] = z
m1[3, 3] = 1.0
m2[0, 0] = m2[1, 1] = m2[2, 2] = 1.0
m2[:3, 3] = -position_vector
m2[3, 3] = 1.0
return np.matmul(m1, m2)
def get_projection_matrix(near, far):
aspect = 1.0
fov = 1.0 # 90 Degrees
m = np.zeros([4, 4], dtype=np.float32)
m[0, 0] = fov/aspect
m[1, 1] = fov
m[2, 2] = (-far)/(far-near)
m[2, 3] = (-near*far)/(far-near)
m[3, 2] = -1.0
return m
position_vector = np.array([0, 0, 0], dtype=np.float32)
front_vector = np.array([0, 0, -1], dtype=np.float32)
up_vector = np.array([0, 1, 0], dtype=np.float32)
viewing_matrix = get_lookat_matrix(position_vector=position_vector, front_vector=front_vector, up_vector=up_vector)
print("viewing_matrix\n", viewing_matrix, "\n\n")
projection_matrix = get_projection_matrix(near=0.1, far=100.0)
point = np.array([1, 0, -10, 1], dtype=np.float32)
projected_point = projection_matrix.dot(viewing_matrix.dot(point))
# Normalize
projected_point /= projected_point[3]
print(projected_point)
And it happens with many changes of the coordinates. I'm not sure where am I wrong.
gluLookAt defines a 4*4 viewing transformation matrix, for the use of OpenGL.
A "mathematical" 4*4 matrix looks like this:
c0 c1 c2 c3 c0 c1 c2 c3
[ Xx Yx Zx Tx ] [ 0 4 8 12 ]
[ Xy Yy Zy Ty ] [ 1 5 9 13 ]
[ Xz Yz Zz Tz ] [ 2 6 10 14 ]
[ 0 0 0 1 ] [ 3 7 11 15 ]
But the memory image of a 4*4 OpenGL matrix looks like this:
[ Xx, Xy, Xz, 0, Yx, Yy, Yz, 0, Zx, Zy, Zz, 0, Tx, Ty, Tz, 1 ]
See The OpenGL Shading Language 4.6, 5.4.2 Vector and Matrix Constructors, page 101
and OpenGL ES Shading Language 3.20 Specification, 5.4.2 Vector and Matrix Constructors, page 100:
To initialize a matrix by specifying vectors or scalars, the components are assigned to the matrix elements in column-major order.
mat4(float, float, float, float, // first column
float, float, float, float, // second column
float, float, float, float, // third column
float, float, float, float); // fourth column
Note, in compare to a mathematical matrix where the columns are written from top to bottom, which feels natural, at the initialization of an OpenGL matrix, the colums are written from the left to the right. This lead sto the benefit, that the x, y, z components of an axis or of the translation are in direct succession in the memory. This is a big advantage when accessing the axis vectors or the translation vector of the matrix.
See also Data Type (GLSL) - Matrix constructors.
This means you have to "swap" columns and rows (transpose) of the matrix:
def get_lookat_matrix(position_vector, front_vector, up_vector):
m1 = np.zeros([4, 4], dtype=np.float32)
m2 = np.zeros([4, 4], dtype=np.float32)
z = normalize_vector(-front_vector)
x = normalize_vector(np.cross(up_vector, z))
y = np.cross(z, x)
m1[0, :3] = x
m1[1, :3] = y
m1[2, :3] = z
m1[3, 3] = 1.0
m2[0, 0] = m2[1, 1] = m2[2, 2] = 1.0
m2[3, :3] = -position_vector
m2[3, 3] = 1.0
return np.matmul(m1, m2)
def get_projection_matrix(near, far):
aspect = 1.0
fov = 1.0 # 90 Degrees
m = np.zeros([4, 4], dtype=np.float32)
m[0, 0] = fov/aspect
m[1, 1] = fov
m[2, 2] = (-far+near)/(far-near)
m[3, 2] = (-2.0*near*far)/(far-near)
m[2, 3] = -1.0
return m
There's a minor change you must do:
m[2, 2] = -(far+near)/(far-near) //instead of m[2, 2] = (-far)/(far-near)
m[2, 3] = (-2.0*near*far)/(far-near) //instead of m[2, 3] = (-near*far)/(far-near)
The big thing is the row/column order of your matrices.
As #Rabbid76 pointed out, mayor column order is preferred. GLSL provides a function to transpose a matrix. You can also tell to transpose the matrix when it's passed to GPU with glUniformMatrix family commands.
Let's see how to work with row mayor order matrices, as your code does.
The goal, by now with CPU, is to get: finalPoint = matrixMultiply(C, P) with C the combined matrix and P the point coordinates. matrixMultiply is any function you use to do matrices multplication. Remember the order matters, A·B is not the same as B·A
Because C is a 4x4 matrix and P is 1x4, C·P is not possible, it must be P·C.
Notice that with column order P is 4x1 and then C·P is the right operation.
Let's call L the look-at matrix (proper name is view matrix). It's formed by an orientation matrix O and a translation matrix T. With column order is L= O·T.
A property of transposed matrix is (A·B)t = Bt · At
So, with row order you get O·T = Oct · Tct = (Tc · Oc)t where c is for column order. Hey! what we wish is (Oc · Tc)t Notice the change in order of multiplication?
So, if you work with row mayor order matrices, the order they are multiplied is swapped.
The view&projection combined matrix also must be swapped.
Thus replace:
return np.matmul(m2, m1) //was return np.matmul(m1, m2)
and
//was projected_point = projection_matrix.dot(viewing_matrix.dot(point))
projected_point = point.dot(viewing_matrix.dot(projection_matrix))
Despite of all of above, I recommend to work with column mayor order. That's best for OpenGL. And you'll understand better any maths and tutorials you find on OpenGL.

How do I overwrite a row vector in a numpy array?

I am trying to normalize each row vector of numpy array x, but I'm facing 2 problems.
I'm unable to update the row vectors of x (source code in image)
Is it possible to avoid the for loop (line 6) with any numpy functions?
import numpy as np
x = np.array([[0, 3, 4] , [1, 6, 4]])
c = x ** 2
for i in range(0, len(x)):
print(x[i]/np.sqrt(c[i].sum())) #prints [0. 0.6 0.8]
x[i] = x[i]/np.sqrt(c[i].sum())
print(x[i]) #prints [0 0 0]
print(x) #prints [[0 0 0] [0 0 0]] and wasn't updated
I've just recently started out with numpy, so any assistance would be greatly appreciated!
I'm unable to update the row vectors of x (source code in image)
Your np.array has no dtype argument, so it uses <type 'numpy.int32'>. If you wish to store floats in the array, add a float dtype:
x = np.array([
[0,3,4],
[1,6,4]
], dtype = np.float)
To see this, compare
x = np.array([
[0,3,4],
[1,6,4]
], dtype = np.float)
print type(x[0][0]) # output = <type 'numpy.float64'>
to
x = np.array([
[0,3,4],
[1,6,4]
])
print type(x[0][0]) # output = <type 'numpy.int32'>
is it possible to avoid the for loop (line 6) with any numpy functions?
This is how I would do it:
norm1, norm2 = np.linalg.norm(x[0]), np.linalg.norm(x[1])
print x[0] / norm1
print x[1] / norm2
You can use:
x/np.sqrt((x*x).sum(axis=1))[:, None]
Example:
In [9]: x = np.array([[0, 3, 4] , [1, 6, 4]])
In [10]: x/np.sqrt((x*x).sum(axis=1))[:, None]
Out[10]:
array([[0. , 0.6 , 0.8 ],
[0.13736056, 0.82416338, 0.54944226]])
For the first question:
x = np.array([[0,3,4],[1,6,4]],dtype=np.float32)
For the second question:
x/np.sqrt(np.sum(x**2,axis=1).reshape((len(x),1)))
Given 2-dimensional array
x = np.array([[0, 3, 4] , [1, 6, 4]])
Row-wise L2 norm of that array can be calculated with:
norm = np.linalg.norm(x, axis = 1)
print(norm)
[5. 7.28010989]
You can not divide array x of shape (2, 3) by norm of shape (2,), the following trick enables that by adding extra dimension to norm
# Divide by adding extra dimension
x = x / norm[:, None]
print(x)
[[0. 0.6 0.8 ]
[0.13736056 0.82416338 0.54944226]]
This solves both your questions

Categories

Resources