Vectorising the padding of ragged arrays [duplicate] - python

consider the list of lists l
l = [[1, 2, 3], [1, 2]]
if I convert this to a np.array I'll get a one dimensional object array with [1, 2, 3] in the first position and [1, 2] in the second position.
print(np.array(l))
[[1, 2, 3] [1, 2]]
I want this instead
print(np.array([[1, 2, 3], [1, 2, np.nan]]))
[[ 1. 2. 3.]
[ 1. 2. nan]]
I can do this with a loop, but we all know how unpopular loops are
def box_pir(l):
lengths = [i for i in map(len, l)]
shape = (len(l), max(lengths))
a = np.full(shape, np.nan)
for i, r in enumerate(l):
a[i, :lengths[i]] = r
return a
print(box_pir(l))
[[ 1. 2. 3.]
[ 1. 2. nan]]
how do I do this in a fast, vectorized way?
timing
setup functions
%%cython
import numpy as np
def box_pir_cython(l):
lengths = [len(item) for item in l]
shape = (len(l), max(lengths))
a = np.full(shape, np.nan)
for i, r in enumerate(l):
a[i, :lengths[i]] = r
return a
def box_divikar(v):
lens = np.array([len(item) for item in v])
mask = lens[:,None] > np.arange(lens.max())
out = np.full(mask.shape, np.nan)
out[mask] = np.concatenate(v)
return out
def box_hpaulj(LoL):
return np.array(list(zip_longest(*LoL, fillvalue=np.nan))).T
def box_simon(LoL):
max_len = len(max(LoL, key=len))
return np.array([x + [np.nan]*(max_len-len(x)) for x in LoL])
def box_dawg(LoL):
cols=len(max(LoL, key=len))
rows=len(LoL)
AoA=np.empty((rows,cols, ))
AoA.fill(np.nan)
for idx in range(rows):
AoA[idx,0:len(LoL[idx])]=LoL[idx]
return AoA
def box_pir(l):
lengths = [len(item) for item in l]
shape = (len(l), max(lengths))
a = np.full(shape, np.nan)
for i, r in enumerate(l):
a[i, :lengths[i]] = r
return a
def box_pandas(l):
return pd.DataFrame(l).values

This seems to be a close one of this question, where the padding was with zeros instead of NaNs. Interesting approaches were posted there, along with mine based on broadcasting and boolean-indexing. So, I would just modify one line from my post there to solve this case like so -
def boolean_indexing(v, fillval=np.nan):
lens = np.array([len(item) for item in v])
mask = lens[:,None] > np.arange(lens.max())
out = np.full(mask.shape,fillval)
out[mask] = np.concatenate(v)
return out
Sample run -
In [32]: l
Out[32]: [[1, 2, 3], [1, 2], [3, 8, 9, 7, 3]]
In [33]: boolean_indexing(l)
Out[33]:
array([[ 1., 2., 3., nan, nan],
[ 1., 2., nan, nan, nan],
[ 3., 8., 9., 7., 3.]])
In [34]: boolean_indexing(l,-1)
Out[34]:
array([[ 1, 2, 3, -1, -1],
[ 1, 2, -1, -1, -1],
[ 3, 8, 9, 7, 3]])
I have posted few runtime results there for all the posted approaches on that Q&A, which could be useful.

Probably the fastest list version uses itertools.zip_longest (may be izip_longest in Py2):
In [747]: np.array(list(itertools.zip_longest(*ll,fillvalue=np.nan))).T
Out[747]:
array([[ 1., 2., 3.],
[ 1., 2., nan]])
The plain zip produces:
In [748]: list(itertools.zip_longest(*ll))
Out[748]: [(1, 1), (2, 2), (3, None)]
another zip 'transposes':
In [751]: list(zip(*itertools.zip_longest(*ll)))
Out[751]: [(1, 2, 3), (1, 2, None)]
Often when starting with lists (or even an object array of lists), it is faster to stick with list methods. There's an substantial overhead in creating an array or dataframe.
This isn't the first time this question has been asked.
How can I pad and/or truncate a vector to a specified length using numpy?
My answer there includes both this zip_longest and your box_pir
I think there's also a fast numpy version using a flattened array, but I don't recall the details. It was probably given by Warren or Divakar.
I think the 'flattened' version works something along this line:
In [809]: ll
Out[809]: [[1, 2, 3], [1, 2]]
In [810]: sll=np.hstack(ll) # all values in a 1d array
In [816]: res=np.empty((2,3)); res.fill(np.nan) # empty target
get flattened indices where values go. This is the crucial step. Here the use of r_ is iterative; the fast version probably uses cumsum
In [817]: idx=np.r_[0:3, 3:3+2]
In [818]: idx
Out[818]: array([0, 1, 2, 3, 4])
In [819]: res.flat[idx]=sll
In [820]: res
Out[820]:
array([[ 1., 2., 3.],
[ 1., 2., nan]])
================
so the missing link is >np.arange() broadcasting
In [897]: lens=np.array([len(i) for i in ll])
In [898]: mask=lens[:,None]>np.arange(lens.max())
In [899]: mask
Out[899]:
array([[ True, True, True],
[ True, True, False]], dtype=bool)
In [900]: idx=np.where(mask.ravel())
In [901]: idx
Out[901]: (array([0, 1, 2, 3, 4], dtype=int32),)

I might write this as a form of slice assignment on each of the sub arrays that have been filled with a default:
def to_numpy(LoL, default=np.nan):
cols=len(max(LoL, key=len))
rows=len(LoL)
AoA=np.empty((rows,cols, ))
AoA.fill(default)
for idx in range(rows):
AoA[idx,0:len(LoL[idx])]=LoL[idx]
return AoA
I added in Divakar's Boolean Indexing as f4 and added to the timing testing. At least on my testing, (Python 2.7 and Python 3.5; Numpy 1.11) it is not the fastest.
Timing shows that izip_longest or f2 is slightly faster for most lists but slice assignment (which is f1) is faster for larger lists:
from __future__ import print_function
import numpy as np
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
def f1(LoL):
cols=len(max(LoL, key=len))
rows=len(LoL)
AoA=np.empty((rows,cols, ))
AoA.fill(np.nan)
for idx in range(rows):
AoA[idx,0:len(LoL[idx])]=LoL[idx]
return AoA
def f2(LoL):
return np.array(list(zip_longest(*LoL,fillvalue=np.nan))).T
def f3(LoL):
max_len = len(max(LoL, key=len))
return np.array([x + [np.nan]*(max_len-len(x)) for x in LoL])
def f4(LoL):
lens = np.array([len(item) for item in LoL])
mask = lens[:,None] > np.arange(lens.max())
out = np.full(mask.shape,np.nan)
out[mask] = np.concatenate(LoL)
return out
if __name__=='__main__':
import timeit
for case, LoL in (('small', [list(range(20)), list(range(30))] * 1000),
('medium', [list(range(20)), list(range(30))] * 10000),
('big', [list(range(20)), list(range(30))] * 100000),
('huge', [list(range(20)), list(range(30))] * 1000000)):
print(case)
for f in (f1, f2, f3, f4):
print(" ",f.__name__, timeit.timeit("f(LoL)", setup="from __main__ import f, LoL", number=100) )
Prints:
small
f1 0.245459079742
f2 0.209980010986
f3 0.350691080093
f4 0.332141160965
medium
f1 2.45869493484
f2 2.32307982445
f3 3.65722203255
f4 3.55545687675
big
f1 25.8796288967
f2 26.6177148819
f3 41.6916451454
f4 41.3140149117
huge
f1 262.429639101
f2 295.129109859
f3 427.606887817
f4 441.810388088

Maybe something like this? Don't know about your hardware, but means at 16ms for 100 loops for l2 = [list(range(20)), list(range(30))] * 10000.
from numpy import nan
def box(l):
max_lenght = len(max(l, key=len))
return [x + [nan]*(max_lenght-len(x)) for x in l]

If this is only for a 2D list, this might be your answer:
from numpy import nan
def even(data):
maxlen = max(len(l) for l in data)
for l in data:
l.extend([nan] * (maxlen - len(l)))
And if you don't want to modify the actual list:
from numpy import nan
def even(data):
res = data.copy()
maxlen = max(len(l) for l in res)
for l in res:
l.extend([nan] * (maxlen - len(l)))
return res

Related

How to invert volume of matrices in numpy?

Please assume a vector of invertible matrices:
import numpy as np
a = np.arange(120).reshape((2, 2, 5, 6))
I want to invert the matrices over their defined axes:
b = np.linalg.inv(a, axis1=0, axis2=1)
but this does not seems supported.
How to achieve this?
inv docs specifies its array input as:
a : (..., M, M) array_like
Matrix to be inverted.
You have a
a = np.arange(120).reshape((2, 2, 5, 6))
(M,M,...)
The dimensions are in the wrong order - change them!
In [44]: a = np.arange(120).reshape((2, 2, 5, 6))
Change the axes to the order that inv accepts:
In [45]: A = a.transpose(2,3,0,1)
In [46]: Ai = np.linalg.inv(A)
In [47]: Ai.shape
Out[47]: (5, 6, 2, 2)
In [48]: ai = Ai.transpose(2,3,0,1) # and back
In [49]: ai.shape
Out[49]: (2, 2, 5, 6)
I was going to test the result, but got:
In [50]: x = a#ai
Traceback (most recent call last):
File "<ipython-input-50-9dfe3616745d>", line 1, in <module>
x = a#ai
ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 5 is different from 6)
Like inv, matmul treats the last 2 dimensions as the matrix, the first 2 as 'batch':
In [51]: x = A#Ai
In [52]: x[0,0]
Out[52]:
array([[1., 0.],
[0., 1.]])
In [53]: x[0,3]
Out[53]:
array([[1.00000000e+00, 1.38777878e-17],
[4.44089210e-16, 1.00000000e+00]])
We can do the equivalent with einsum:
In [55]: x = np.einsum('ijkl,jmkl->imkl',a,ai)
In [56]: x[:,:,0,0]
Out[56]:
array([[1., 0.],
[0., 1.]])
You might want to change the original specification to match the inv and matmul usage. It could make life easier for you. Also remember that in numpy the trailing dimensions are the inner most ones.
If you know that the matrices are 2x2 you can do that easily using the standard formula for inverting such matrices; otherwise, I fear the only reasonable solution would be to do it with for loops? For example, the following works for any shape (modifying the sizes adequately):
b = np.stack([np.linalg.inv(a[:, :, i, j]) for i in range(a.shape[2]) for j in range(a.shape[3])], axis=2)
b = b.reshape(2, 2, 5, 6)
as checked by
for i in range(a.shape[2]):
for j in range(a.shape[3]):
assert np.allclose(np.dot(a[:,:,i,j], b[:,:,i,j]), np.eye(2))
In the specific 2x2 case you can do the following, which is fully vectorized hence probably faster:
determinants = a[0, 0] * a[1, 1] - a[0, 1] * a[1, 0]
b = 1 / determinants * np.stack([
np.stack([a[1, 1], -a[0, 1]]),
np.stack([-a[1, 0], a[0, 0]]),
])
On the specific (small) input size, the second solution is about 10 times faster in my tests (43us vs. 537us).

Hadamard product for each unique pair of columns in numpy array

Using Python (3.7.7) and numpy (1.17.4), I am working with medium sized 2d numpy arrays (from 5000x80 up to 200,000x120). For a given array, I want to calculate the Hadamard product between all possbible uniqe pairs of column-vectors of that array.
I have:
A A
[a,b,c,d] [a,b,c,d]
[1,2,3,4] [1,2,3,4]
[4,5,6,7] * [4,5,6,7]
[7,8,9,1] [7,8,9,1]
and I want to get:
[a*b, ac, ad, bc, bd, cd]
[ 2., 3., 4., 6., 8., 12.]
[20., 24., 28., 30., 35., 42.]
[56., 63., 7., 72., 8., 9.]
I already have a solution from a colleague using np.kron which I adapated a bit:
def hadamard_kron(A: np.ndarray) -> :
"""Returns the hadamard products of all unique pairs of all columns,
and return indices signifying which columns constitute a given pair.
"""
n = raw_inputs.shape[0]
ind1 = (np.kron(np.arange(0, n).reshape((n, 1)), np.ones((n, 1)))).squeeze().astype(int)
ind2 = (np.kron(np.ones((n, 1)), np.arange(0, n).reshape((n, 1)))).squeeze().astype(int)
xmat2 = np.kron(raw_inputs, np.ones((n, 1))) * np.kron(np.ones((n, 1)), raw_inputs)
hadamard_inputs = xmat2[ind2 > ind1, :]
ind1_ = ind1[ind1 < ind2]
ind2_ = ind2[ind1 < ind2]
return hadamard_A, ind1_, ind2_
hadamard_A, first_pair_members, second_pair_members = hadamard_kron(a.transpose())
Note that hadamard_A is what I want, but transposed (which is also what I want for further processing). Also, ind1_ (ind2_) gives the indices for the objects which feature as the first (second) element in the pair for which the hadamard product is calculated. I need those as well.
However, I feel this code is too inefficient: it takes to long and since I call this function several times during my algorithm, I was wondering whether there is a cleverer solution? Am I overlooking some numpy/scipy tools I could cleverly combine for this task?
Thanks all! :)
Approach #1
Simplest one with np.triu_indices -
In [45]: a
Out[45]:
array([[1, 2, 3, 4],
[4, 5, 6, 7],
[7, 8, 9, 1]])
In [46]: r,c = np.triu_indices(a.shape[1],1)
In [47]: a[:,c]*a[:,r]
Out[47]:
array([[ 2, 3, 4, 6, 8, 12],
[20, 24, 28, 30, 35, 42],
[56, 63, 7, 72, 8, 9]])
Approach #2
Memory-efficient one for large arrays -
m,n = a.shape
s = np.r_[0,np.arange(n-1,-1,-1).cumsum()]
out = np.empty((m, n*(n-1)//2), dtype=a.dtype)
for i,(s0,s1) in enumerate(zip(s[:-1], s[1:])):
out[:,s0:s1] = a[:,i,None] * a[:,i+1:]
Approach #3
Masking based one -
m,n = a.shape
mask = ~np.tri(n,dtype=bool)
m3D = np.broadcast_to(mask, (m,n,n))
b1 = np.broadcast_to(a[...,None], (m,n,n))
b2 = np.broadcast_to(a[:,None,:], (m,n,n))
out = (b1[m3D]* b2[m3D]).reshape(m,-1)
Approach #4
Extend approach #2 for a numba one -
from numba import njit
def numba_app(a):
m,n = a.shape
out = np.empty((m, n*(n-1)//2), dtype=a.dtype)
return numba_func(a,out,m,n)
#njit
def numba_func(a,out,m,n):
for p in range(m):
I = 0
for i in range(n):
for j in range(i+1,n):
out[p,I] = a[p,i] * a[p,j]
I += 1
return out
Then, leverage parallel processing (as pointed out in comments by #max9111), like so -
from numba import prange
def numba_app_parallel(a):
m,n = a.shape
out = np.empty((m, n*(n-1)//2), dtype=a.dtype)
return numba_func_parallel(a,out,m,n)
#njit(parallel=True)
def numba_func_parallel(a,out,m,n):
for p in prange(m):
I = 0
for i in range(n):
for j in range(i+1,n):
out[p,I] = a[p,i] * a[p,j]
I += 1
return out
Benchmarking
Using benchit package (few benchmarking tools packaged together; disclaimer: I am its author) to benchmark proposed solutions.
import benchit
in_ = [np.random.rand(5000, 80), np.random.rand(10000, 100), np.random.rand(20000, 120)]
funcs = [ehsan, app1, app2, app3, numba_app, numba_app_parallel]
t = benchit.timings(funcs, in_, indexby='shape')
t.rank()
t.plot(logx=False, save='timings.png')
Conclusion : Numba ones seem to be doing pretty well and app2 among NumPy ones.
Another equivalent approach to Divakar's first approach:
r,c = np.triu_indices(A.shape[1],1)
np.einsum('ij,ik->ijk',A,A)[:,r,c]
output:
[[ 2 3 4 6 8 12]
[20 24 28 30 35 42]
[56 63 7 72 8 9]]

Is there any elegant way to make dot product between a one row matrix and each row of a more than one row matrix?

Is there any elegant way to make dot product between a one-row matrix and each row of a two or more row matrix in python? I am using list or list of list to represent the matrix.
If A is an n×m matrix (n rows, each containing m entries), and v is a row vector of m entries, then the "dot product between the row vector and each row of a two or more row matrix" is usually called the matrix-vector product between A and v.T (the transpose of v, that is, a column vector).
With Numpy, this is simply np.dot(A, np.reshape(v, (-1, 1))). In plain Python, you could write something like:
def inner_product(u, v):
'Inner product between two numeric arrays.'
return sum(x*y for x, y in zip(u, v))
def mat_vec_product(a, v):
'Dot product between matrix `a` and column vector `v`.'
return [inner_product(u, v) for u in a]
>>> mat = [
... [0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... ]
>>> row = [100, 20, 3]
>>> print(mat_vec_product(mat, row))
[0, 3, 20, 23, 100, 103]
You can also use cdist from scipy with custom distance function which is dot product in this case. In this case, you have to convert list of list into numpy array first.
import numpy as np
from scipy.spatial.distance import cdist
x1 = np.atleast_2d(np.array([1, 2, 3]))
x2 = np.array([[1,2,3],
[2,3,4],
[4,5,6]])
D = cdist(x1, x2, lambda u, v: np.dot(u.T, v))
D.ravel()
Output
array([ 14., 20., 32.])
Or it can be a simple matrix trick e.g. D = x1.dot(x2.T) (output is the same i.e. array([ 14., 20., 32.]))

How I select/format only the values from a dictionary into a list or numpy array?

How do I get it to print just a list of the averages?
I just need it to be the exact same format as my np
arrays so I can compare them to see if they are the same or not.
Code:
import numpy as np
from pprint import pprint
centroids = np.array([[3,44],[4,15],[5,15]])
dataPoints = np.array([[2,4],[17,4],[45,2],[45,7],[16,32],[32,14],[20,56],[68,33]])
def size(vector):
return np.sqrt(sum(x**2 for x in vector))
def distance(vector1, vector2):
return size(vector1 - vector2)
def distances(array1, array2):
lists = [[distance(vector1, vector2) for vector2 in array2] for vector1 in array1]
#print lists.index(min, zip(*lists))
smallest = [min(zip(l,range(len(l)))) for l in zip(*lists)]
clusters = {}
for j, (_, i) in enumerate(smallest):
clusters.setdefault(i,[]).append(dataPoints[j])
pprint (clusters)
print'\nAverage of Each Point'
avgDict = {}
for k,v in clusters.iteritems():
avgDict[k] = sum(v)/ (len(v))
avgList = np.asarray(avgDict)
pprint (avgList)
distances(centroids,dataPoints)
Current Output:
{0: [array([16, 32]), array([20, 56])],
1: [array([2, 4])],
2: [array([17, 4]),
array([45, 2]),
array([45, 7]),
array([32, 14]),
array([68, 33])]}
Average of Each Point
array({0: array([18, 44]), 1: array([2, 4]), 2: array([41, 12])}, dtype=object)
Desired Output:
[[18,44],[2,4],[41,12]]
Or whatever the best format to compare my arrays/lists. I am aware I should have just stuck with one data type.
Do you try to cluster the dataPoints by the index of the nearest centroids, and find out the average position of the clustered points? If it is, I advise to use some broadcast rules of numpy to get the output you need.
Consider this,
np.linalg.norm(centroids[None, :, :] - dataPoints[:, None, :], axis=-1)
It creates a matrix showing all distances between dataPoints and centroids,
array([[ 40.01249805, 11.18033989, 11.40175425],
[ 42.3792402 , 17.02938637, 16.2788206 ],
[ 59.39696962, 43.01162634, 42.05948169],
[ 55.97320788, 41.77319715, 40.79215611],
[ 17.69180601, 20.80865205, 20.24845673],
[ 41.72529209, 28.01785145, 27.01851217],
[ 20.80865205, 44.01136217, 43.65775991],
[ 65.9241989 , 66.48308055, 65.520989 ]])
And you can compute the indices of the nearest centroids by this trick (they are split into 3 lines for readability),
In: t0 = centroids[None, :, :] - dataPoints[:, None, :]
In: t1 = np.linalg.norm(t0, axis=-1)
In: t2 = np.argmin(t1, axis=-1)
Now t2 has the indices,
array([1, 2, 2, 2, 0, 2, 0, 2])
To find the #1 cluster, use the boolean mask t2 == 0,
In: dataPoints[t2 == 0]
Out: array([[16, 32],
[20, 56]])
In: dataPoints[t2 == 1]
Out: array([[2, 4]])
In: dataPoints[t2 == 2]
Out: array([[17, 4],
[45, 2],
[45, 7],
[32, 14],
[68, 33]])
Or just calculate the average in your case,
In: np.mean(dataPoints[t2 == 0], axis=0)
Out: array([ 18., 44.])
In: np.mean(dataPoints[t2 == 1], axis=0)
Out: array([ 2., 4.])
In: np.mean(dataPoints[t2 == 2], axis=0)
Out: array([ 41.4, 12. ])
Of course, the latter blocks can be rewritten in for-loop if you want.
It might be a good practice to formulate the solution by numpy's conventions in my opinion.

Copy upper triangle to lower triangle in a python matrix

iluropoda_melanoleuca bos_taurus callithrix_jacchus canis_familiaris
ailuropoda_melanoleuca 0 84.6 97.4 44
bos_taurus 0 0 97.4 84.6
callithrix_jacchus 0 0 0 97.4
canis_familiaris 0 0 0 0
This is a short version of the python matrix I have. I have the information in the upper triangle. Is there an easy function to copy the upper triangle to the down triangle of the matrix?
To do this in NumPy, without using a double loop, you can use tril_indices. Note that depending on your matrix size, this may be slower that adding the transpose and subtracting the diagonal though perhaps this method is more readable.
>>> i_lower = np.tril_indices(n, -1)
>>> matrix[i_lower] = matrix.T[i_lower] # make the matrix symmetric
Be careful that you do not try to mix tril_indices and triu_indices as they both use row major indexing, i.e., this does not work:
>>> i_upper = np.triu_indices(n, 1)
>>> i_lower = np.tril_indices(n, -1)
>>> matrix[i_lower] = matrix[i_upper] # make the matrix symmetric
>>> np.allclose(matrix.T, matrix)
False
The easiest AND FASTEST (no loop) way to do this for NumPy arrays is the following:
The following is ~3x faster for 100x100 matrices compared to the accepted answer and roughly the same speed for 10x10 matrices.
import numpy as np
X= np.array([[0., 2., 3.],
[0., 0., 6.],
[0., 0., 0.]])
X = X + X.T - np.diag(np.diag(X))
print(X)
#array([[0., 2., 3.],
# [2., 0., 6.],
# [3., 6., 0.]])
Note that the matrix must either be upper triangular to begin with or it should be made upper triangular as follows.
rng = np.random.RandomState(123)
X = rng.randomint(10, size=(3, 3))
print(X)
#array([[2, 2, 6],
# [1, 3, 9],
# [6, 1, 0]])
X = np.triu(X)
X = X + X.T - np.diag(np.diag(X))
print(X)
#array([[2, 2, 6],
# [2, 3, 9],
# [6, 9, 0]])
If I understand the question correctly, I believe this will work
for i in range(num_rows):
for j in range(i, num_cols):
matrix[j][i] = matrix[i][j]
Heres a better one i guess :
>>> a = np.arange(16).reshape(4, 4)
>>> print(a)
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> iu = np.triu_indices(4,1)
>>> il = (iu[1],iu[0])
>>> a[il]=a[iu]
>>> a
array([[ 0, 1, 2, 3],
[ 1, 5, 6, 7],
[ 2, 6, 10, 11],
[ 3, 7, 11, 15]])
If U is an upper triangular matrix, you can use triu and transpose to make it symmetric:
LDU = triu(U,1)+U.T
def inmatrix(m,n):#input Matrix Function
a=[]
for i in range(m):
b=[]
for j in range(n):
elm=int(input("Enter number in Pocket ["+str(i)+"]["+str(j)+"] "))
b.append(elm)
a.append(b)
return a
def Matrix(a):#print Matrix Function
for i in range(len(a)):
for j in range(len(a[0])):
print(a[i][j],end=" ")
print()
m=int(input("Enter number of row "))
n=int(input("Enter number of column"))
a=inmatrix(m,n) #call input Matrix function
Matrix(a)#print Matrix
t=[]#create Blank list
for i in range(m):
for j in range(n):
if i>j:#check upper triangular Elements
t.append(a[i][j])#add them in a list
k=0#variable for list
for i in range(m):
for j in range(n):
if i<j:
a[i][j]=t[k]copy list item to lower triangular
k=k+1
Matrix(a)# print Matrix after change

Categories

Resources