I have 3 numpy matrices:
One contains pixels positions in X (x_pos), another pixel positions in Y (y_pos) and a last one containing pixel values (p_value)
I would like to use these 3 matrices to build a results image
With loops I have this result:
#Resulting image
res = np.zeros((128,128,3), dtype = np.uint8)
for i in range(x_pos.shape[0]):
for j in range(x_pos.shape[1]):
# Get coordinates
x = x_pos[i][j]
y = y_pos[i][j]
res[y,x] = p_value[i][j]
With large matrices (2048*2048) this code already takes a lot of time. Is it possible to optimize this code without using a nested loop?
I specify that the positions in the pos_x and pos_y matrices do not necessarily follow each other, there may be holes or duplicate values
It should be possible using np.meshgrid
i = np.arange(0, x.shape[0])
j = np.arange(0, x.shape[1])
i_1, j_1 = np.meshgrid(i, j, indexing='ij')
res[y_1.ravel(),x_1.ravel()] = p_value[i_1.ravel(),j_1.ravel()]
First use consistent numpy 2d array indexing:
x = x_pos[i,j]
y = y_pos[i,j]
res[y,x] = p_value[i,j]
Now instead of scalar i,j use arrays
i = np.arange(n); j = np.arange(m)
You didn't provida [mcve] so I won't try to demonstrate that th
Thanks to #hpaulj and #ai2ys answer the problem is solved.
Here is a comparison of the results in terms of execution speed:
import numpy as np
import cv2
import time
m_size = 4096
m_x = np.random.randint(0,m_size,(m_size,m_size), dtype = np.uint16)
m_y = np.random.randint(0,m_size,(m_size,m_size), dtype = np.uint16)
p_value = np.ones((m_size,m_size), dtype = np.uint8)
#Meshgrid method:
out = np.zeros((m_size,m_size),dtype=np.uint8)
start = time.time()
i = np.arange(0, m_x.shape[0])
j = np.arange(0, m_x.shape[1])
i_1, j_1 = np.meshgrid(i, j, indexing='ij')
out[m_x.ravel(),m_y.ravel()] = p_value[i_1.ravel(),j_1.ravel()]
end = time.time()
print("Meshgrid: {} s".format(end - start))
#No for loop method:
out = np.zeros((m_size,m_size),dtype=np.uint8)
start = time.time()
i = np.arange(m_x.shape[0])
j = np.arange(m_y.shape[1])
x = m_x[i,j]
y = m_y[i,j]
out[x,y] = p_value[i,j]
end = time.time()
print("No loop: {} s".format(end - start))
#For loop method:
out = np.zeros((m_size,m_size),dtype=np.uint8)
start = time.time()
for i in range(m_x.shape[0]):
for j in range(m_y.shape[1]):
x = m_x[i,j]
y = m_y[i,j]
out[x,y] = p_value[i,j]
end = time.time()
print("Nested loop: {} s".format(end - start))
#Output:
Meshgrid: 0.4837045669555664 s
No loop: 0.3600656986236572 s
Nested loop: 13.10097336769104 s
Related
I have an array of Pe of shape (5100,5100). I am trying to find the neighbor indices using the code below but for this shape of Pe, the computational time is 100 seconds. Is there a more time efficient way to do it?
def get_neighbor_indices(position, dimensions):
'''
dimensions is a shape of np.array
'''
i, j = position
indices = [(i+1,j), (i-1,j), (i,j+1), (i,j-1)]
return [
(i,j) for i,j in indices
if i>=0 and i<dimensions[0]
and j>=0 and j<dimensions[1]
]
def iterate_array(init_i, init_j, arr, condition_func):
'''
arr is an instance of np.array
condition_func is a function (value) => boolean
'''
indices_to_check = [(init_i,init_j)]
checked_indices = set()
result = []
t0 = None
t1 = None
timestamps = []
while indices_to_check:
pos = indices_to_check.pop()
if pos in checked_indices:
continue
item = arr[pos]
checked_indices.add(pos)
if condition_func(item):
result.append(item)
t1=time.time()
if(t0==None):
t0=t1
timestamps.append(t1-t0)
indices_to_check.extend(
get_neighbor_indices(pos, arr.shape)
)
return result,timestamps
Visited_Elements,timestamps=iterate_array(0,0, Pe, lambda x : x < Pin0)
With scipy and a slight change in the way the filter condition is described this can be made rather fast:
import numpy as np
from scipy.ndimage import label
import time
def collect_array(init_i, init_j, arr, condition_arr):
t0=time.time()
if not condition_arr[init_i, init_j]:
return [], time.time() - t0
islands = label(condition_arr)[0]
mask = islands != islands[init_i, init_j]
mx = np.ma.masked_array(Pe, mask)
return mx.compressed(), time.time() - t0
# Trying it out
Pe = np.random.rand(5100, 5100)
Pin0 = 0.7
Visited_Elements, timestamp = collect_array(0,0, Pe, Pe < Pin0)
print(Visited_Elements,timestamp)
The core of the code is the label function and the fact that the condition function is replaced by a boolean array.
Context:
I have a function to upsample multiple arrays that I want to write as efficiently as possible (because I have to run it 370000 times).
This function takes multiple inputs and is composed of 2 for loops. To upsample my arrays, I loop over this function with a parameter k, and I would like to get rid of this loop (which sits outside of the function). I tried using a mix of map() and list-comprehension to minimize my computing time but I can't make it work.
Question:
How to get my map() part of the code working (see last section of code) ? Is there a better way than map() to get rid of for loops ?
Summary:
Function interpolate_and_get_results: 2 for loops. Takes 3D, 2D arrays and int as inputs
This function is ran inside a for loop (parameter k) that I want to get rid of
I wrote some example code, the part with map() does not work because I can't think of a way to pass the k parameter as a list, but also an input.
Thank you !
ps: code to parallelize the interpolation function that I do not use for this example
import numpy as np
import time
#%% --- SETUP OF THE PROBLEM --- %%#
temperatures = np.random.rand(10,4,7)*100
precipitation = np.random.rand(10,4,7)
snow = np.random.rand(10,4,7)
# Flatten the arrays to make them iterable with map()
temperatures = temperatures.reshape(10,4*7)
precipitation = precipitation.reshape(10,4*7)
snow = snow.reshape(10,4*7)
# Array of altitudes to "adjust" the temperatures
alt = np.random.rand(4,7)*1000
# Flatten the array
alt = alt.reshape(4*7)
# Weight Matrix
w = np.random.rand(4*7, 1000, 1000)
#%% Function
def interpolate_and_get_results(temp, prec, Eprec, w, i, k):
# Do some calculations
factor1 = ((temperatures[i,k]-272.15) + (-alt[k] * -6/1000))
factor2 = precipitation[i,k]
factor3 = snow[i,k]
# Iterate through every cell of the upsampled arrays
for i in range(w.shape[1]):
for j in range(w.shape[2]):
val = w[k, i, j]
temp[i, j] += factor1 * val
prec[i, j] += factor2 * val
Eprec[i, j] += factor3 * val
#%% --- Function call without loop simplification --- ##%
# Prepare a template array
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the global arrays to be filled
tempYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
ts = time.time()
for i in range(temperatures.shape[0]):
# Create empty host arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
for k in range(w.shape[0]):
interpolate_and_get_results(temp, prec, Eprec, w, i, k)
print('Time: ', (time.time()-ts))
#%% --- With Map (DOES NOT WORK) --- %%#
del k
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the global arrays to be filled
tempYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
# Create a list k to be iterated through with the map() function
k = [k for k in range(0, temperatures.shape[1])]
for i in range(temperatures.shape[0]):
# Create empty host arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
# Call the interpolate function with map() iterating through k
map(interpolate_and_get_results(temp, prec, Eprec, w, i, k), k)
Code from #Jérôme Richard using numba added at the request of user #ken (takes 48.81s to run on my pc):
import numpy as np
import multiprocessing as mp
import time
#%% ------ Create data ------ ###
temperatures = np.random.rand(10,4,7)*100
precipitation = np.random.rand(10,4,7)
snow = np.random.rand(10,4,7)
# Array of altitudes to "adjust" the temperatures
alt = np.random.rand(4,7)*1000
#%% ------ IDW Interpolation ------ ###
# We create a weight matrix that we use to upsample our temperatures, precipitations and snow matrices
# This part is not that important, it works well as it is
MX,MY = np.shape(temperatures[0])
N = 300
T = np.zeros([N*MX+1, N*MY+1])
# create NxM inverse distance weight matrices based on Gaussian interpolation
x = np.arange(0,N*MX+1)
y = np.arange(0,N*MY+1)
X,Y = np.meshgrid(x,y)
k = 0
w = np.zeros([MX*MY,N*MX+1,N*MY+1])
for mx in range(MX):
for my in range(MY):
# Gaussian
add_point = np.exp(-((mx*N-X.T)**2+(my*N-Y.T)**2)/N**2)
w[k,:,:] += add_point
k += 1
sum_weights = np.sum(w, axis=0)
for k in range(MX*MY):
w[k,:,:] /= sum_weights
#%% --- Function --- %%#
# Code from Jérôme Richard: https://stackoverflow.com/questions/72399050/parallelize-three-nested-loops/72399494?noredirect=1#comment127919686_72399494
import numba as nb
# get_results + interpolator
#nb.njit('void(float64[:,::1], float64[:,::1], float64[:,::1], float64[:,:,::1], int_, int_, int_, int_)', parallel=True)
def interpolate_and_get_results(temp, prec, Eprec, w, i, k, mx, my):
factor1 = ((temperatures[i,mx,my]-272.15) + (-alt[mx, my] * -6/1000))
factor2 = precipitation[i,mx,my]
factor3 = snow[i,mx,my]
# Filling the
for i in nb.prange(w.shape[1]):
for j in range(w.shape[2]):
val = w[k, i, j]
temp[i, j] += factor1 * val
prec[i, j] += factor2 * val
Eprec[i, j] += factor3 * val
#%% --- Main Loop --- %%#
ts = time.time()
if __name__ == '__main__':
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the permanent arrays to be filled
tempYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smbYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
# Initialize semi-permanent array
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
# Loop over the "time" axis
for i in range(0, temperatures.shape[0]):
# Create empty semi-permanent arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
# Loop over the different weights
for k in range(w.shape[0]):
# Loops over the cells of the array to be upsampled
for mx in range(MX):
for my in range(MY):
interpolate_and_get_results(temp, prec, Eprec, w, i, k, mx, my)
# At each timestep, update the semi-permanent array using the results from the interpolate function
smb[np.logical_and(temp <= 0, prec > 0)] += prec[np.logical_and(temp <= 0, prec > 0)]
# Fill the permanent arrays (equivalent of storing the results at the end of every year)
# and reinitialize the semi-permanent array every 5th timestep
if i%5 == 0:
# Permanent
tempYEAR[int(i/5)] = temp
precYEAR[int(i/5)] = prec
EprecYEAR[int(i/5)] = Eprec
smbYEAR[int(i/5)] = smb
# Semi-permanent
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
print("Time spent:", time.time()-ts)
Note: This answer is not about how to use map, it's about "a better way".
You are doing a lot of redundant calculations. Believe it or not, this code outputs the same result.
# No change in the initialization section above.
ts = time.time()
if __name__ == '__main__':
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the permanent arrays to be filled
tempYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smbYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
temperatures_inter = temperatures - 272.15
w_inter = w.sum(axis=0)
alt_inter = (alt * (-6 / 1000)).sum()
for i in range(0, temperatures_inter.shape[0]):
temp_i = (temperatures_inter[i].sum() - alt_inter) * w_inter
prec_i = precipitation[i].sum() * w_inter
Eprec_i = snow[i].sum() * w_inter
condition = np.logical_and(temp_i <= 0, prec_i > 0)
smb[condition] += prec_i[condition]
if i % 5 == 0:
tempYEAR[i // 5] = temp_i
precYEAR[i // 5] = prec_i
EprecYEAR[i // 5] = Eprec_i
smbYEAR[i // 5] = smb
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
print("Time spent:", time.time() - ts)
I verified the results by comparing them to the output of the code that uses numba. The difference is about 0.0000001, which is probably caused by rounding error.
print((tempYEAR_from_yours - tempYEAR_from_mine).sum()) # -8.429287845501676e-08
print((precYEAR_from_yours - precYEAR_from_mine).sum()) # 2.595697878859937e-09
print((EprecYEAR_from_yours - EprecYEAR_from_mine).sum()) # -7.430216442116944e-09
print((smbYEAR_from_yours - smbYEAR_from_mine).sum()) # -6.875431779462815e-09
On my PC, this code took 0.36 seconds. It does not use numba and is not even parallelized. It just eliminated redundancy.
I have a 4D numpy array of size (98,359,256,269) that I want to threshold.
Right now, I have two separate lists that keep the coordinates of the first 2 dimension and the last 2 dimensions. (mag_ang for the first 2 dimensions and indices for the last 2).
size of indices : (61821,2)
size of mag_ang : (35182,2)
Currently, my code looks like this:
inner_points = []
for k in indices:
x = k[0]
y = k[1]
for i,ctr in enumerate(mag_ang):
mag = ctr[0]
ang = ctr[1]
if X[mag][ang][x][y] > 10:
inner_points.append((y,x))
This code works but it's pretty slow and I wonder if there's any more pythonic/faster way to do this?s
(EDIT: added a second alternate method)
Use numpy multi-array indexing:
import time
import numpy as np
n_mag, n_ang, n_x, n_y = 10, 12, 5, 6
shape = n_mag, n_ang, n_x, n_y
X = np.random.random_sample(shape) * 20
nb_indices = 100 # 61821
indices = np.c_[np.random.randint(0, n_x, nb_indices), np.random.randint(0, n_y, nb_indices)]
nb_mag_ang = 50 # 35182
mag_ang = np.c_[np.random.randint(0, n_mag, nb_mag_ang), np.random.randint(0, n_ang, nb_mag_ang)]
# original method
inner_points = []
start = time.time()
for x, y in indices:
for mag, ang in mag_ang:
if X[mag][ang][x][y] > 10:
inner_points.append((y, x))
end = time.time()
print(end - start)
# faster method 1:
inner_points_faster1 = []
start = time.time()
for x, y in indices:
if np.any(X[mag_ang[:, 0], mag_ang[:, 1], x, y] > 10):
inner_points_faster1.append((y, x))
end = time.time()
print(end - start)
# faster method 2:
start = time.time()
# note: depending on the real size of mag_ang and indices, you may wish to do this the other way round ?
found = X[:, :, indices[:, 0], indices[:, 1]][mag_ang[:, 0], mag_ang[:, 1], :] > 10
# 'found' shape is (nb_mag_ang x nb_indices)
assert found.shape == (nb_mag_ang, nb_indices)
matching_indices_mask = found.any(axis=0)
inner_points_faster2 = indices[matching_indices_mask, :]
end = time.time()
print(end - start)
# finally assert equality of findings
inner_points = np.unique(np.array(inner_points))
inner_points_faster1 = np.unique(np.array(inner_points_faster1))
inner_points_faster2 = np.unique(inner_points_faster2)
assert np.array_equal(inner_points, inner_points_faster1)
assert np.array_equal(inner_points, inner_points_faster2)
yields
0.04685807228088379
0.0
0.0
(of course if you increase the shape the time will not be zero for the second and third)
Final note: here I use "unique" at the end, but it would maybe be wise to do it upfront for the indices and mag_ang arrays (except if you are sure that they are unique already)
Use numpy directly. If indices and mag_ang are numpy arrays of two columns each for the appropriate coordinate:
(x, y), (mag, ang) = indices.T, mag_ang.T
index_matrix = np.meshgrid(mag, ang, x, y).T.reshape(-1,4)
inner_mag, inner_ang, inner_x, inner_y = np.where(X[index_matrix] > 10)
Now you the inner... variables hold arrays for each coordinate. To get a single list of pars you can zip the inner_y and inner_x.
Here are few vecorized ways leveraging broadcasting -
thresh = 10
mask = X[mag_ang[:,0],mag_ang[:,1],indices[:,0,None],indices[:,1,None]]>thresh
r = np.where(mask)[0]
inner_points_out = indices[r][:,::-1]
For larger arrays, we can compare first and then index to get the mask -
mask = (X>thresh)[mag_ang[:,0],mag_ang[:,1],indices[:,0,None],indices[:,1,None]]
If you are only interested in the unique coordinates off indices, use the mask directly -
inner_points_out = indices[mask.any(1)][:,::-1]
For large arrays, we can also leverage multi-cores with numexpr module.
Thus, first off import the module -
import numexpr as ne
Then, replace (X>thresh) with ne.evaluate('X>thresh') in the computation(s) listed earlier.
Use np.where
inner = np.where(X > 10)
a, b, x, y = zip(*inner)
inner_points = np.vstack([y, x]).T
I'm working on a neural network where I am augmenting data via rotation and varying the size of each input volume.
Let me back up, the input to the network is a 3D volume. I generate variable size 3D volumes, and then pad each volume with zero's such that the input volume is constant. Check here for an issue I was having with padding (now resolved).
I generate a variable size 3D volume, append it to a list, and then convert the list into a numpy array. At this point, padding hasn't occured so converting it into a 4D tuple makes no sense...
input_augmented_matrix = []
label_augmented_matrix = []
for i in range(n_volumes):
if i % 50 == 0:
print ("Augmenting step #" + str(i))
slice_index = randint(0,n_input)
z_max = randint(5,n_input)
z_rand = randint(3,5)
z_min = z_max - z_rand
x_max = randint(75, n_input_x)
x_rand = randint(60, 75)
x_min = x_max - x_rand
y_max = randint(75, n_input_y)
y_rand = randint(60, 75)
y_min = y_max - y_rand
random_rotation = randint(1,4) * 90
for j in range(2):
temp_volume = np.empty((z_rand, x_rand, y_rand))
k = 0
for z in range(z_min, z_max):
l = 0
for x in range(x_min, x_max):
m = 0
for y in range(y_min, y_max):
if j == 0:
#input volume
try:
temp_volume[k][l][m] = input_matrix[z][x][y]
except:
pdb.set_trace()
else:
#ground truth volume
temp_volume[k][l][m] = label_matrix[z][x][y]
m = m + 1
l = l + 1
k = k + 1
temp_volume = np.asarray(temp_volume)
temp_volume = np.rot90(temp_volume,random_rotation)
if j == 0:
input_augmented_matrix.append(temp_volume)
else:
label_augmented_matrix.append(temp_volume)
input_augmented_matrix = np.asarray(input_augmented_matrix)
label_augmented_matrix = np.asarray(label_augmented_matrix)
The dimensions of input_augmented_matrix at this point is (N,)
Then I pad with the following code...
for i in range(n_volumes):
print("Padding volume #" + str(i))
input_augmented_matrix[i] = np.lib.pad(input_augmented_matrix[i], ((0,n_input_z - int(input_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(input_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(input_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
label_augmented_matrix[i] = np.lib.pad(label_augmented_matrix[i], ((0,n_input_z - int(label_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(label_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(label_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
At this point, the dimensions are still (N,) even though every element of the list is constant. For example input_augmented_matrix[0] = input_augmented_matrix[1]
Currently I just loop through and create a new array, but it takes too long and I would prefer some sort of method that automates this. I do it with the following code...
input_4d = np.empty((n_volumes, n_input_z, n_input_x, n_input_y))
label_4d = np.empty((n_volumes, n_input_z, n_input_x, n_input_y))
for i in range(n_volumes):
print("Converting to 4D tuple #" + str(i))
for j in range(n_input_z):
for k in range(n_input_x):
for l in range(n_input_y):
input_4d[i][j][k][l] = input_augmented_matrix[i][j][k][l]
label_4d[i][j][k][l] = label_augmented_matrix[i][j][k][l]
Is there a cleaner and faster way to do this?
As I understood this part
k = 0
for z in range(z_min, z_max):
l = 0
for x in range(x_min, x_max):
m = 0
for y in range(y_min, y_max):
if j == 0:
#input volume
try:
temp_volume[k][l][m] = input_matrix[z][x][y]
except:
pdb.set_trace()
else:
#ground truth volume
temp_volume[k][l][m] = label_matrix[z][x][y]
m = m + 1
l = l + 1
k = k + 1
You just want to do this
temp_input = input_matrix[z_min:z_max, x_min:x_max, y_min:y_max]
temp_label = label_matrix[z_min:z_max, x_min:x_max, y_min:y_max]
and then
temp_input = np.rot90(temp_input, random_rotation)
temp_label = np.rot90(temp_label, random_rotation)
input_augmented_matrix.append(temp_input)
label_augmented_matrix.append(temp_label)
Here
input_augmented_matrix[i] = np.lib.pad(
input_augmented_matrix[i],
((0,n_input_z - int(input_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(input_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(input_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
Better to do this, because shape property gives you size of array by all dimensions
ia_shape = input_augmented_matrix[i].shape
input_augmented_matrix[i] = np.lib.pad(
input_augmented_matrix[i],
((0, n_input_z - ia_shape[0]),
(0, n_input_x - ia_shape[1])),
(0, n_input_y - ia_shape[2]))),
'constant',
constant_values=0)
I guess now you're ready to refactor the last part of your code with magic indexing of NumPy.
My common suggestions:
use functions for repeated parts of code to avoid such indents like in your cascade of loops;
if you need so lot of nested loops, think about recursion, if you can't deal without them;
explore abilities of NumPy in official documentation: they're really exciting ;) For example, indexing is helpful for this task;
use PyLint and Flake8 packages to inspect quality of your code.
Do you want to write neural network by yourself, or you just want to solve some patterns recognition task? SciPy library may contain what you need and it's based on NumPy.
I am trying to perform image correlation to find which frame out of a set of 20 frames (the set is stored in a 3D array, x) matches best with a given frame (stored as a 2D array, y). This step has to be performed 1000 times.
I tried to vectorize the code to make it run faster. But somehow the vectorization is making the code take twice as long. I am probably doing something wrong in the vectorization process which is making it slower.
Here is the code
import numpy as np
import time
def corr2(a,b):
#Getting shapes and prealocating the auxillairy variables
k = np.shape(a)
#Calculating mean values
AM=np.mean(a)
BM=np.mean(b)
#calculate vectors
c_vect = (a-AM)*(b-BM)
d_vect = (a-AM)**2
e_vect = (b-BM)**2
#Formula itself
r_out = np.sum(c_vect)/float(np.sqrt(np.sum(d_vect)*np.sum(e_vect)))
return r_out
def ZZ_1X_v1_MCC(R,RefImage):
from img_proccessing import corr2
Cor = np.zeros(R.shape[2])
for t in range(R.shape[2]):
Cor[t]=corr2(RefImage,R[:,:,t]) #Correlation
#report
max_correlationvalue_intermediate = np.amax(Cor)
max_correlatedframe_intermediate = np.argmax(Cor)
max_correlatedframeandvalue = [max_correlatedframe_intermediate,max_correlationvalue_intermediate];
return max_correlatedframeandvalue
def ZZ_1X_v1_MCC_vectorized(R,RefImage):
R_shape = np.asarray(np.shape(R))
R_flattened = R.swapaxes(0,2).reshape(R_shape[2],R_shape[0]*R_shape[1])
AA = np.transpose(R_flattened)
RefImageflattened = RefImage.transpose().ravel()
#Calculating mean subtracted values
AAM = AA - np.mean(AA,axis=0)
BM = RefImageflattened - np.mean(RefImageflattened)
#calculate vectors
DD_vect = AAM**2
E_vect = BM**2
EE_vect = np.transpose(np.tile(np.transpose(E_vect),(R_shape[2],1)))
CC_vect = AAM*np.transpose(np.tile(BM,(R_shape[2],1)))
#Formula itself
Cor = np.sum(CC_vect,axis=0)/np.sqrt((np.sum(DD_vect,axis=0)*np.sum(EE_vect,axis=0)).astype(float))
#report
max_correlationvalue_intermediate = np.amax(Cor)
max_correlatedframe_intermediate = np.argmax(Cor)
max_correlatedframeandvalue = [max_correlatedframe_intermediate,max_correlationvalue_intermediate];
return max_correlatedframeandvalue
x = np.arange(400000).reshape((20,200,100)).swapaxes(0,2) #3D array with 20 frames
y = np.transpose(np.arange(20000).reshape((200,100))) #2D array with 1 frame
# using for loop
tic = time.time()
for i in range(500):
[a,b] = ZZ_1X_v1_MCC(x,y)
print(time.time() - tic)
# using vectorization
tic = time.time()
for i in range(500):
[a,b] = ZZ_1X_v1_MCC_vectorized(x,y)
print(time.time() - tic)