How to use numpy in matrices iterations like Matlab - python

I am used to make my discrete time control systems simulations in Matlab and now I'm trying python and numpy.
So, my code bellow is working, but I would like to iterate over the numpy vector instead appending values into a list. Is it possible?
In other words, instead of using
xl.append(xt)
ul.append(uc)
I would like to use some Matlab equivalent like x[:, k+1] = np.dot(Ad, x[:, k]) + Bd*uc, but it's not working on my code. If I do that, instead of obtaining a two line column vector that is the expected, I got a 2x2 matrix and an error.
Another question: Why it's neccessary to use plt.plot(tk, u[:, 0], label='u') instead plt.plot(tk, u, label='u') ?
from control.matlab import *
import math
import numpy as np
import matplotlib.pyplot as plt
Ts = 0.1
N = 50
#x = np.zeros((2, N+1))
tk = np.zeros(N)
u = np.zeros(N)
v = np.random.randn(N)/86.6 #% measurement noise
wn = 1.12
wn2 = pow(wn, 2)
A = [[0, 1], [-1.5, -1.4]]
B = [[0], [1.5]]
C = [[1, 0]]
D = 0
# Control gains
K = np.array([2.64, 3.41071429])
# Now build a feedback with control law u = -K*x
Ad = np.eye(2) + np.multiply(A, Ts)
Bd = np.multiply(B, Ts)
Cd = C
xt = [[1.0], [0.12]] # initial states
xl = []
ul = []
for k in range(0, N):
tk[k] = k*Ts
uc = -K.dot(xt)
xt = np.dot(Ad, xt) + Bd*uc
xt[1, 0] += v[k]
xl.append(xt)
ul.append(uc)
x = np.array(xl)
u = np.array(ul)
#x = np.delete(x, N, 1) # delete the last position of x
#s = TransferFunction.s
#Gs = wn2/(s**2 + 0*s + wn2) # This is the KF solution
#yout, T = step(Gs)
plt.rcParams["figure.figsize"] = (10, 7)
plt.figure()
#plt.plot(T, yout, label='Open loop')
plt.plot(tk, x[:, 0], label='x_0')
plt.plot(tk, x[:, 1], label='x_1')
plt.plot(tk, u[:, 0], label='u')
plt.legend()
plt.title('Pendulum ex. 7.14 Franklin book')
plt.xlabel('Time')
plt.ylabel('amp.')
plt.show()
what I want is the code like this:
from control.matlab import *
import math
import numpy as np
import matplotlib.pyplot as plt
Ts = 0.1
N = 50
x = np.zeros((2, N+1))
tk = np.zeros(N)
u = np.zeros(N)
v = np.random.randn(N)/86.6 #% measurement noise
wn = 1.12
wn2 = pow(wn, 2)
A = [[0, 1], [-1.5, -1.4]]
B = [[0], [1.5]]
C = [[1, 0]]
D = 0
# Control gains
K = np.array([2.64, 3.41071429])
# Now build a feedback with control law u = -K*x
Ad = np.eye(2) + np.multiply(A, Ts)
Bd = np.multiply(B, Ts)
Cd = C
for k in range(0, N):
tk[k] = k*Ts
u[k] = -K.dot(x[:, k])
x[1, k] += v[k]
x[:, k+1] = np.dot(Ad, x[:, k]) + Bd*u[k]
x = np.delete(x, N, 1) # delete the last position of x
#s = TransferFunction.s
#Gs = wn2/(s**2 + 0*s + wn2) # This is the KF solution
#yout, T = step(Gs)
plt.rcParams["figure.figsize"] = (10, 7)
plt.figure()
#plt.plot(T, yout, label='Open loop')
plt.plot(tk, x[:, 0], label='x_0')
plt.plot(tk, x[:, 1], label='x_1')
plt.plot(tk, u[:, 0], label='u')
plt.legend()
plt.title('Pendulum ex. 7.14 Franklin book')
plt.xlabel('Time')
plt.ylabel('amp.')
plt.show()
But it results in a following error:
Traceback (most recent call last):
File "C:\Users\ ... \np_matrices_v1.py", line 46, in <module>
x[:, k+1] = np.dot(Ad, x[:, k]) + Bd*u[k]
ValueError: could not broadcast input array from shape (2,2) into shape (2,)

I don't know why, but if you try:
A = np.array([[1, 2], [2, 3]])
x = np.array([[0.5], [2.0]])
y = A.dot(x)
print(y)
xa = np.zeros((2, 10))
xa[:, 2] = A.dot(x)
You'll get:
Traceback (most recent call last):
File "C:\Users\eletr\.spyder-py3\temp.py", line 19, in <module>
xa[:, 2] = A.dot(x)
ValueError: could not broadcast input array from shape (2,1) into shape (2,)
But if you do:
import numpy as np
A = np.array([[1, 2], [2, 3]])
x = np.array([[0.5], [2.0]])
y = A.dot(x)
print(y)
xa = np.zeros((2, 10))
# xa[:, 2] = A.dot(x)
xa[:, [2]] = A.dot(x)
print(xa)
You'll get the correct answer:
[[4.5]
[7. ]]
[[0. 0. 4.5 0. 0. 0. 0. 0. 0. 0. ]
[0. 0. 7. 0. 0. 0. 0. 0. 0. 0. ]]
Can anyone explain it?

In [248]: A = np.array([[1, 2], [2, 3]])
...: x = np.array([[0.5], [2.0]])
In [249]: A.shape, x.shape
Out[249]: ((2, 2), (2, 1))
In [250]: y = A.dot(x)
In [251]: y.shape
Out[251]: (2, 1)
Note the shapes. x is (2,1), and as a result y is too. y can be assigned to a (2,1) slot, but not a (2,) shape.
In [252]: xa = np.zeros((2,5),int)
In [253]: xa
Out[253]:
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
In [254]: xa[:,2]
Out[254]: array([0, 0]) # (2,) shape
In [255]: xa[:,[2]]
Out[255]:
array([[0], # (2,1) shape
[0]])
In contrast to MATLAB numpy arrays can be 1d, e.g. (2,). Also leading dimensions are the outermost, as opposed to trailing. MATLAB readily reduces a (2,3,1) shape to (2,3), but a (2,1,1) only becomes (2,1).
broadcasting the way numpy uses arrays that can differ in shape. The two basic rules are that
- leading size 1 dimensions can added automatically to match
- size 1 dimensions can be adjusted to match
Thus a (2,) can become a (1,2).
If you remove the inner [] from x, you get a 1d array:
In [256]: x = np.array([0.5, 2.0])
In [257]: x.shape
Out[257]: (2,)
In [258]: A.dot(x)
Out[258]: array([4.5, 7. ]) # (2,) shape
This can then be assigned to a row of xa: xa[:,2] = A.dot(x)
reshape and ravel can be used to remove dimensions. Also indexing A.dot(x)[:,0]

Related

How to create a 2D Numpy array using a function on a 1D array?

I'm trying to write a function in Python using Numpy that would take as input a vector x = array([x_1, x_2, ..., x_n]) and a positive integer m and return an array of dimension n by m of the form (say n==3, m==4):
array([[ x_1, x_1^2, x_1^3, x_1^4],
[ x_2, x_2^2, x_2^3, x_2^4],
[ x_3, x_3^2, x_3^3, x_3^4]])
So far I have
import numpy as np
def f(x, m):
return x ** np.arange(1, m+1)
which works fine for n==1, but raises an exception for n > 1.
I am trying to avoid using loops. Is there a nice way of making this in Numpy?
Thank you!
You can
Use np.vander
x = np.array([2.3,1.3,4.4])
m = 4
np.vander(x,m+1,True)[:,1:]
# array([[ 2.3 , 5.29 , 12.167 , 27.9841],
# [ 1.3 , 1.69 , 2.197 , 2.8561],
# [ 4.4 , 19.36 , 85.184 , 374.8096]])
or
use np.power.outer
np.power.outer(x,np.arange(1,m+1))
# same output
or
use np.logspace
np.logspace(1,m,m,True,x[:,None])
# same output
or
use np.multply.accumulate or cumprod
np.multiply.accumulate(np.broadcast_to(x,(m,np.size(x))),axis=0).T
# same output
np.broadcast_to(x,(m,np.size(x))).cumprod(axis=0).T
# same output
You can use broadcasting for this:
def f(x, m):
return x[:, np.newaxis] ** np.arange(1, m + 1)[np.newaxis, :]
This sets x as a shape (n, 1) vector, the range as a (1, m) vector, and has them interact in the way that you want.
Here's what it looks like in my IPython REPL.
In [1]: import numpy as np
In [2]: def f(x, m):
...: return x[:, np.newaxis] ** np.arange(1, m + 1)[np.newaxis, :]
...:
In [3]: x = np.array([1,2,3,4,5])
In [4]: f(x, 6)
Out[4]:
array([[ 1, 1, 1, 1, 1, 1],
[ 2, 4, 8, 16, 32, 64],
[ 3, 9, 27, 81, 243, 729],
[ 4, 16, 64, 256, 1024, 4096],
[ 5, 25, 125, 625, 3125, 15625]])

How do I overwrite a row vector in a numpy array?

I am trying to normalize each row vector of numpy array x, but I'm facing 2 problems.
I'm unable to update the row vectors of x (source code in image)
Is it possible to avoid the for loop (line 6) with any numpy functions?
import numpy as np
x = np.array([[0, 3, 4] , [1, 6, 4]])
c = x ** 2
for i in range(0, len(x)):
print(x[i]/np.sqrt(c[i].sum())) #prints [0. 0.6 0.8]
x[i] = x[i]/np.sqrt(c[i].sum())
print(x[i]) #prints [0 0 0]
print(x) #prints [[0 0 0] [0 0 0]] and wasn't updated
I've just recently started out with numpy, so any assistance would be greatly appreciated!
I'm unable to update the row vectors of x (source code in image)
Your np.array has no dtype argument, so it uses <type 'numpy.int32'>. If you wish to store floats in the array, add a float dtype:
x = np.array([
[0,3,4],
[1,6,4]
], dtype = np.float)
To see this, compare
x = np.array([
[0,3,4],
[1,6,4]
], dtype = np.float)
print type(x[0][0]) # output = <type 'numpy.float64'>
to
x = np.array([
[0,3,4],
[1,6,4]
])
print type(x[0][0]) # output = <type 'numpy.int32'>
is it possible to avoid the for loop (line 6) with any numpy functions?
This is how I would do it:
norm1, norm2 = np.linalg.norm(x[0]), np.linalg.norm(x[1])
print x[0] / norm1
print x[1] / norm2
You can use:
x/np.sqrt((x*x).sum(axis=1))[:, None]
Example:
In [9]: x = np.array([[0, 3, 4] , [1, 6, 4]])
In [10]: x/np.sqrt((x*x).sum(axis=1))[:, None]
Out[10]:
array([[0. , 0.6 , 0.8 ],
[0.13736056, 0.82416338, 0.54944226]])
For the first question:
x = np.array([[0,3,4],[1,6,4]],dtype=np.float32)
For the second question:
x/np.sqrt(np.sum(x**2,axis=1).reshape((len(x),1)))
Given 2-dimensional array
x = np.array([[0, 3, 4] , [1, 6, 4]])
Row-wise L2 norm of that array can be calculated with:
norm = np.linalg.norm(x, axis = 1)
print(norm)
[5. 7.28010989]
You can not divide array x of shape (2, 3) by norm of shape (2,), the following trick enables that by adding extra dimension to norm
# Divide by adding extra dimension
x = x / norm[:, None]
print(x)
[[0. 0.6 0.8 ]
[0.13736056 0.82416338 0.54944226]]
This solves both your questions

Numpy: Get rectangle area just the size of mask

I have an image and a mask. Both are numpy array. I get the mask through GraphSegmentation (cv2.ximgproc.segmentation), so the area isn't rectangle, but not divided. I'd like to get a rectangle just the size of masked area, but I don't know the efficient way.
In other words, unmasked pixels are value of 0 and masked pixels are value over 0, so I want to get a rectangle where...
top = the smallest index of axis 0 whose value > 0
bottom = the largest index of axis 0 whose value > 0
left = the smallest index axis 1 whose value > 0
right = the largest index axis 1 whose value > 0
image = src[top : bottom, left : right]
My code is below
segmentation = cv2.ximgproc.segmentation.createGraphSegmentation()
src = cv2.imread('image_file')
segment = segmentation.processImage(src)
for i in range(np.max(segment)):
dst = np.array(src)
dst[segment != i] = 0
cv2.imwrite('output_file', dst)
If you prefer pure Numpy, you can achieve this using np.where and np.meshgrid:
i, j = np.where(mask)
indices = np.meshgrid(np.arange(min(i), max(i) + 1),
np.arange(min(j), max(j) + 1),
indexing='ij')
sub_image = image[indices]
np.where returns a tuple of arrays specifying, pairwise, the indices in each axis for each non-zero element of mask. We then create arrays of all the row and column indices we will want using np.arange, and use np.meshgrid to generate two grid-shaped arrays that index the part of the image we're interested in. Note that we specify matrix-style indexing using index='ij' to avoid having to transpose the result (the default is Cartesian-style indexing).
Essentially, meshgrid constructs indices so that:
image[indices][a, b] == image[indices[0][a, b], indices[1][a, b]]
Example
Start with the following:
>>> image = np.arange(12).reshape((4, 3))
>>> image
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Let's say we want to extract the [[3,4],[6,7]] sub-matrix, which is the bounding rectangle for the the following mask:
>>> mask = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,0]])
>>> mask
array([[0, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 0]])
Then, applying the above method:
>>> i, j = np.where(mask)
>>> indices = np.meshgrid(np.arange(min(i), max(i) + 1), np.arange(min(j), max(j) + 1), indexing='ij')
>>> image[indices]
array([[3, 4],
[6, 7]])
Here, indices[0] is a matrix of row indices, while indices[1] is the corresponding matrix of column indices:
>>> indices[0]
array([[1, 1],
[2, 2]])
>>> indices[1]
array([[0, 1],
[0, 1]])
I think using np.amax and np.amin and cropping the image is much faster.
i, j = np.where(mask)
indices = np.meshgrid(np.arange(min(i), max(i) + 1),
np.arange(min(j), max(j) + 1),
indexing='ij')
sub_image = image[indices]
Time taken: 50 msec
where = np.array(np.where(mask))
x1, y1 = np.amin(where, axis=1)
x2, y2 = np.amax(where, axis=1)
sub_image = image[x1:(x2+1), y1:(y2+1)]
Time taken: 5.6 msec
I don't get Hans's results when running the two methods (using NumPy 1.18.5). In any case, there is a much more efficient method, where you take the arg-max along each dimension
i, j = np.where(mask)
y, x = np.meshgrid(
np.arange(min(i), max(i) + 1),
np.arange(min(j), max(j) + 1),
indexing="ij",
)
Took 38 ms
where = np.array(np.where(mask))
y1, x1 = np.amin(where, axis=1)
y2, x2 = np.amax(where, axis=1) + 1
sub_image = image[y1:y2, x1:x2]
Took 35 ms
maskx = np.any(mask, axis=0)
masky = np.any(mask, axis=1)
x1 = np.argmax(maskx)
y1 = np.argmax(masky)
x2 = len(maskx) - np.argmax(maskx[::-1])
y2 = len(masky) - np.argmax(masky[::-1])
sub_image = image[y1:y2, x1:x2]
Took 2 ms
Timings script

Tensorflow: tensor multiplication row-by-row with more different matrices

I have a matrix A which is defined as a tensor in tensorflow, of n rows and p columns. Moreover, I have say k matrices B1,..., Bk with p rows and q columns. My goal is to obtain a resulting matrix C of n rows and q columns where each row of C is the matrix product of the corresponding row in A with one of the B matrices. Which B to choose is determined by a give index vector I of dimension n that can take values ranging from 1 to k. In my case, the B are weight variables while I is another tensor variable given as input.
An example of code in numpy would look as follows:
A = array([[1, 0, 1],
[0, 0, 1],
[1, 1, 0],
[0, 1, 0]])
B1 = array([[1, 1],
[2, 1],
[3, 6]])
B2 = array([[1, 5],
[3, 2],
[0, 2]])
B = [B1, B2]
I = [1, 0, 0, 1]
n = A.shape[0]
p = A.shape[1]
q = B1.shape[1]
C = np.zeros(shape = (n,q))
for i in xrange(n):
C[i,:] = np.dot(A[i,:],B[I[i]])
How can this be translated in tensor flow?
In my specific case the variables are defined as:
A = tf.placeholder("float", [None, p])
B1 = tf.Variable(tf.random_normal(p,q))
B2 = tf.Variable(tf.random_normal(p,q))
I = tf.placeholder("float",[None])
This is a bit tricky and there are probably better solutions. Taking your first example, my approach computes C as follows:
C = diag([0,1,1,0]) * A * B1 + diag([1,0,0,1]) * A * B2
where diag([0,1,1,0]) is the diagonal matrix having vector [0,1,1,0] in its diagonal. This can be achieved through tf.diag() in TensorFlow.
For convenience, let me assume that k<=n (otherwise some B matrices would remain unused). The following script obtains those diagonal values from vector I and computes C as mentioned above:
k = 2
n = 4
p = 3
q = 2
a = array([[1, 0, 1],
[0, 0, 1],
[1, 1, 0],
[0, 1, 0]])
index_input = [1, 0, 0, 1]
import tensorflow as tf
# Creates a dim·dim tensor having the same vector 'vector' in every row
def square_matrix(vector, dim):
return tf.reshape(tf.tile(vector,[dim]), [dim,dim])
A = tf.placeholder(tf.float32, [None, p])
B = tf.Variable(tf.random_normal(shape=[k,p,q]))
# For the first example (with k=2): B = tf.constant([[[1, 1],[2, 1],[3, 6]],[[1, 5],[3, 2],[0, 2]]], tf.float32)
C = tf.Variable(tf.zeros((n, q)))
I = tf.placeholder(tf.int32,[None])
# Create a n·n tensor 'indices_matrix' having indices_matrix[i]=I for 0<=i<n (each row vector is I)
indices_matrix = square_matrix(I, n)
# Create a n·n tensor 'row_matrix' having row_matrix[i]=[i,...,i] for 0<=i<n (each row vector is a vector of i's)
row_matrix = tf.transpose(square_matrix(tf.range(0, n, 1), n))
# Find diagonal values by comparing tensors indices_matrix and row_matrix
equal = tf.cast(tf.equal(indices_matrix, row_matrix), tf.float32)
# Compute C
for i in range(k):
diag = tf.diag(tf.gather(equal, i))
mul = tf.matmul(diag, tf.matmul(A, tf.gather(B, i)))
C = C + mul
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print(sess.run(C, feed_dict={A : a, I : index_input}))
As an improvement, C may be computed using a vectorized implementation instead of using a for loop.
Just do 2 matrix multiplications
A1 = A[0:3:3,...] # this will get the first last index of your original but just make a new matrix
A2 = A[1:2]
in tensorflow
A1 = tf.constant([matrix elements go here])
A2 = tf.constant([matrix elements go here])
B = ...
B1 = tf.matmul(A1,B)
B2 = tf.matmul(A2,B)
C = tf.pack([B1,B2])
granted if you need to reorganize the C tensor you can also use gather
C = tf.gather(C,[0,3,2,1])

How to vectorize multiple levels of recursion?

I am a noobie to python and numpy (and programming in general). I am trying to speed up my code as much as possible. The math involves several summations over multiple axes of a few arrays. I've attained one level of vectorization, but I can't seem to get any deeper than that and have to resort to for loops (I believe there's three levels of recursion, M, N, and I, one of which I've eliminated, I). Here's my code for the relevant section (this code works, but I'd like to speed it up):
def B1(n, i):
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
for n in N:
B[n, :] = B1(n, I)
for m in M:
for n in N:
C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
A = np.linalg.solve(C[1:, 1:], (0.25)*Y[1:])
dmaxi is just a float and m, n and i are integers. The arrays have the following shapes:
>>> qi.shape
(551,)
>>> N.shape
(18,)
>>> M.shape
(18,)
>>> I.shape
(551,)
>>> Iq.shape
(1, 551, 3)
>>> U.shape
(1, 551, 3)
As you can see I've vectorized the calculation of the 2nd axis of B, but I can't seem to do it for the 1st axis, C, and Y, which still require the for loops. It seems that when I try to do the same form of vectorization that I did for the 1st axis of B (define a function, then give the array as the argument), I get a broadcasting error since it appears to be trying to calculate both axes simultaneously, rather than the 1st, then the 2nd, which is why I had to force it into a for loop instead. The same problem occurs for both C and Y which is why they're both in for loops also. In case that's confusing, essentially what I tried was:
>>> B[:, :] = B1(N, I)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sasrec_v6.py", line 155, in B1
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
ValueError: operands could not be broadcast together with shapes (18) (551)
Vectorizing the 2nd axis of B made a substantial improvement to the speed of my code, so I'm assuming that the same will apply for further vectorization (I hope I'm using that term correctly by the way).
You can use broadcasting to make 2d arrays from your 1d index vectors. I haven't tested these yet, but they should work:
If you reshape the N to be a column vector, then B1 will return a 2d array:
B[N] = B1(N[:, None], I)
For Y and C, I'd use np.einsum to have better control over which axes are mulitplied (probably this could be done with np.dot as well but I'm not sure how.
C[M[:, None], N] = np.einsum('ij,kj->ik',
B[M]/np.square(qi*Iq[0, :, 2]),
B[N])
Y[M] = np.einsum('i, ki->k',
U[0, :, 1]/np.square(qi*Iq[0, :, 2]),
B[M])
To see what that indexing trick does:
In [1]: a = np.arange(3)
In [2]: a
Out[2]: array([0, 1, 2])
In [3]: a[:, None]
Out[3]:
array([[0],
[1],
[2]])
In [4]: b = np.arange(4,1,-1)
In [5]: b
Out[5]: array([4, 3, 2])
In [6]: a[:, None] * b
Out[6]:
array([[0, 0, 0],
[4, 3, 2],
[8, 6, 4]])
It saves two orders of magnitude in time:
In [92]: %%timeit
....: B = np.zeros((18, 551))
....: C = np.zeros((18, 18))
....: Y = np.zeros((18))
....: for n in N:
....: B[n, :] = B1(n, I)
....: for m in M:
....: for n in N:
....: C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
....: Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
....:
100 loops, best of 3: 15.8 ms per loop
In [93]: %%timeit
....: Bv = np.zeros((18, 551))
....: Cv = np.zeros((18, 18))
....: Yv = np.zeros((18))
....: Bv[N] = B1(N[:, None], I)
....: Cv[M[:, None], N] = np.einsum('ij,kj->ik', B[M]/np.square(qi*Iq[0, :, 2]), B[N])
....: Yv[M] = np.einsum('i, ki->k', U[0, :, 1]/np.square(qi*Iq[0, :, 2]), B[M])
....:
1000 loops, best of 3: 1.34 ms per loop
Here's my test:
import numpy as np
# make fake data:
np.random.seed(5)
qi = np.random.rand(551)
N = np.random.randint(0,18,18)#np.arange(18)
M = np.random.randint(0,18,18)#np.arange(18)
I = np.arange(551)
Iq = np.random.rand(1, 551, 3)
U = np.random.rand(1, 551, 3)
B = np.zeros((18, 551))
C = np.zeros((18, 18))
Y = np.zeros((18))
Bv = np.zeros((18, 551))
Cv = np.zeros((18, 18))
Yv = np.zeros((18))
dmaxi = 1.
def B1(n, i):
return np.pi * n * dmaxi * (-1)**(n+1) * np.sin(qi[i]*dmaxi) * ((np.pi*n)**2 - (qi[i]*dmaxi)**2)**(-1)
for n in N:
B[n, :] = B1(n, I)
for m in M:
for n in N:
C[m, n] = np.dot((1/np.square(qi*Iq[0, :, 2]))*B[m, :], B[n, :])
Y[m] = np.dot((1/np.square(qi*Iq[0, :, 2]))*U[0, :, 1], B[m, :])
Bv[N] = B1(N[:, None], I)
print "B correct?", np.allclose(Bv, B)
# np.einsum test case:
n, m = 2, 3
a = np.arange(n*m).reshape(n,m)*8 + 2
b = np.arange(n*m)[::-1].reshape(n,m)
c = np.empty((n,n))
for i in range(n):
for j in range(n):
c[i,j] = np.dot(a[i],b[j])
cv = np.einsum('ij,kj->ik', a, b)
print "einsum test successful?", np.allclose(c,cv)
Cv[M[:, None], N] = np.einsum('ij,kj->ik',
B[M]/np.square(qi*Iq[0, :, 2]),
B[N])
print "C correct?", np.allclose(Cv, C)
Yv[M] = np.einsum('i, ki->k',
U[0, :, 1]/np.square(qi*Iq[0, :, 2]),
B[M])
print "Y correct?", np.allclose(Yv, Y)
output :D
B correct? True
einsum test successful? True
C correct? True
Y correct? True

Categories

Resources