I have N NumPy arrays of shape data[n,m,3]. I want to fit/squeeze/split/slice/reshape them into N' arrays of shape new_data_#[1000,m,3] where # is the indexing of new arrays. The problem is that n can be smaller, or bigger than 1000. When it is smaller somehow I should fill the rest of 1000 capacity of new_array with the next array, and when it is bigger than 1000 I should make a new_data_# and add the rest to that one. I don't know how to manage this. Here is a pseudo-code but it can't be done this way, for example, the while maybe is not necessary. The output can be written to the disk or returned in a new data format.
def array2blocks(array_files)
for each N in array_files:
N = data = np.random.rand(n, m, 3)
new_data = np.zeros((1000, m, 3), dtype=np.float32)
j=0
index = 0
while j <= new_data.shape[0]:
for i in range(data.shape[0]):
print("--->", data[i,:,:])
print (i)
if i <= new_data.shape[0]:
# here first we should check the left capacity of new_data and then insert data into it
# new_data[i, :, :] = data[i, :, :] #this overrides previous items so not correct
print(new_data)
else:
print('n>1000')
new_data_name = 'new_data' + '_' + str(index)
# here fill rest of the data in the new_data
...
index += 1
#when capacity is full write it to the disk
print(new_data)
UPDATE with Aaron's old answer:
I replaced 1000 with batch_size = 5 to make it simple.
def numpyarrays2blocks(array_files):
N1 = np.random.rand(7, 4, 3)
N2 = np.random.rand(7, 4, 3)
N3 = np.random.rand(4, 4, 3)
# array_files = []
array_files.append(N1)
array_files.append(N2)
array_files.append(N3)
for N in array_files:
n = N.shape[0]
m = N.shape[1]
batch_size = 5
# N = data = np.random.rand(n, m, 3)
data = N
# print(data)
new_arrays = []
i = 0 # the current row index to insert
while i < n:
new_data = np.zeros((batch_size, m, 3), dtype=np.float32)
j = min(i + batch_size, n) # the last row (exclusive) to copy to new_data
# j - i is the number of rows to copy
new_data[:j - i, :, :] = data[i:j, :, :]
print('NEW DATA: ', new_data)
i = j # update the index
new_arrays.append(new_data)
print(new_arrays)
data is used to store the temporary result, and data_start is the index to insert rows to data.
Allocate data if it is None
yield data if it is fully filled.
merge_and_split is a generator so that the memory demand should be low.
import random
from typing import Iterator
import numpy as np
def merge_and_split(arrays, batch_size) -> Iterator:
arrays = tuple(arrays)
dtype = arrays[0].dtype
data_shape = (batch_size,) + arrays[0].shape[1:]
assert all(a.shape[1:] == data_shape[1:] for a in arrays), "Shape mismatch"
data = None
data_start = 0
for src in arrays:
src_index = 0
src_avail = src.shape[0]
while src_avail >= 1:
if data is None:
# allocate if None
data = np.zeros(data_shape, dtype=dtype)
data_start = 0
num_moved = min(batch_size - data_start, src_avail)
data[data_start:data_start + num_moved, ...] = src[src_index:src_index + num_moved, ...]
data_start += num_moved
src_index += num_moved
src_avail -= num_moved
if data_start >= batch_size:
yield data
data = None
if data is not None:
yield data
def input_arrays():
number = 10
r = random.Random(13)
return [np.random.randint(0, 10, size=(r.randint(1, 5), 4, 3)) for _ in range(number)]
def main():
# Testing input and output
arrays = input_arrays()
# for i, item in enumerate(arrays):
# print('input', i, item.shape)
# print(item)
result = list(merge_and_split(arrays, 5))
# for i, item in enumerate(result):
# print('result', i, item.shape)
# print(item)
src_concat = np.vstack(arrays)
row_number = sum(s.shape[0] for s in arrays)
print('concatenated', src_concat.shape, row_number)
out_concat = np.vstack(result)
print(out_concat.shape)
print((out_concat[0:row_number, ...] == src_concat).all()) # They are indeed the same
if __name__ == '__main__':
main()
You can concatenate all your original arrays split them:
ars = ... # list of N arrays
ars = np.concatenate(ars, axis=0)
ars = np.split(ars, np.arange(1000, ars.shape[0], 1000))
The last line can be written as ars = np.split(ars, 1000), but only if you're sure that the total number of elements is a multiple of 1000, since np.split will barf otherwise. Specifying explicit split-points, as with np.arange, allows you to have a shorter final segment.
Related
Context:
I have a function to upsample multiple arrays that I want to write as efficiently as possible (because I have to run it 370000 times).
This function takes multiple inputs and is composed of 2 for loops. To upsample my arrays, I loop over this function with a parameter k, and I would like to get rid of this loop (which sits outside of the function). I tried using a mix of map() and list-comprehension to minimize my computing time but I can't make it work.
Question:
How to get my map() part of the code working (see last section of code) ? Is there a better way than map() to get rid of for loops ?
Summary:
Function interpolate_and_get_results: 2 for loops. Takes 3D, 2D arrays and int as inputs
This function is ran inside a for loop (parameter k) that I want to get rid of
I wrote some example code, the part with map() does not work because I can't think of a way to pass the k parameter as a list, but also an input.
Thank you !
ps: code to parallelize the interpolation function that I do not use for this example
import numpy as np
import time
#%% --- SETUP OF THE PROBLEM --- %%#
temperatures = np.random.rand(10,4,7)*100
precipitation = np.random.rand(10,4,7)
snow = np.random.rand(10,4,7)
# Flatten the arrays to make them iterable with map()
temperatures = temperatures.reshape(10,4*7)
precipitation = precipitation.reshape(10,4*7)
snow = snow.reshape(10,4*7)
# Array of altitudes to "adjust" the temperatures
alt = np.random.rand(4,7)*1000
# Flatten the array
alt = alt.reshape(4*7)
# Weight Matrix
w = np.random.rand(4*7, 1000, 1000)
#%% Function
def interpolate_and_get_results(temp, prec, Eprec, w, i, k):
# Do some calculations
factor1 = ((temperatures[i,k]-272.15) + (-alt[k] * -6/1000))
factor2 = precipitation[i,k]
factor3 = snow[i,k]
# Iterate through every cell of the upsampled arrays
for i in range(w.shape[1]):
for j in range(w.shape[2]):
val = w[k, i, j]
temp[i, j] += factor1 * val
prec[i, j] += factor2 * val
Eprec[i, j] += factor3 * val
#%% --- Function call without loop simplification --- ##%
# Prepare a template array
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the global arrays to be filled
tempYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
ts = time.time()
for i in range(temperatures.shape[0]):
# Create empty host arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
for k in range(w.shape[0]):
interpolate_and_get_results(temp, prec, Eprec, w, i, k)
print('Time: ', (time.time()-ts))
#%% --- With Map (DOES NOT WORK) --- %%#
del k
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the global arrays to be filled
tempYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR2 = np.zeros((9, dummy.shape[0], dummy.shape[1]))
# Create a list k to be iterated through with the map() function
k = [k for k in range(0, temperatures.shape[1])]
for i in range(temperatures.shape[0]):
# Create empty host arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
# Call the interpolate function with map() iterating through k
map(interpolate_and_get_results(temp, prec, Eprec, w, i, k), k)
Code from #Jérôme Richard using numba added at the request of user #ken (takes 48.81s to run on my pc):
import numpy as np
import multiprocessing as mp
import time
#%% ------ Create data ------ ###
temperatures = np.random.rand(10,4,7)*100
precipitation = np.random.rand(10,4,7)
snow = np.random.rand(10,4,7)
# Array of altitudes to "adjust" the temperatures
alt = np.random.rand(4,7)*1000
#%% ------ IDW Interpolation ------ ###
# We create a weight matrix that we use to upsample our temperatures, precipitations and snow matrices
# This part is not that important, it works well as it is
MX,MY = np.shape(temperatures[0])
N = 300
T = np.zeros([N*MX+1, N*MY+1])
# create NxM inverse distance weight matrices based on Gaussian interpolation
x = np.arange(0,N*MX+1)
y = np.arange(0,N*MY+1)
X,Y = np.meshgrid(x,y)
k = 0
w = np.zeros([MX*MY,N*MX+1,N*MY+1])
for mx in range(MX):
for my in range(MY):
# Gaussian
add_point = np.exp(-((mx*N-X.T)**2+(my*N-Y.T)**2)/N**2)
w[k,:,:] += add_point
k += 1
sum_weights = np.sum(w, axis=0)
for k in range(MX*MY):
w[k,:,:] /= sum_weights
#%% --- Function --- %%#
# Code from Jérôme Richard: https://stackoverflow.com/questions/72399050/parallelize-three-nested-loops/72399494?noredirect=1#comment127919686_72399494
import numba as nb
# get_results + interpolator
#nb.njit('void(float64[:,::1], float64[:,::1], float64[:,::1], float64[:,:,::1], int_, int_, int_, int_)', parallel=True)
def interpolate_and_get_results(temp, prec, Eprec, w, i, k, mx, my):
factor1 = ((temperatures[i,mx,my]-272.15) + (-alt[mx, my] * -6/1000))
factor2 = precipitation[i,mx,my]
factor3 = snow[i,mx,my]
# Filling the
for i in nb.prange(w.shape[1]):
for j in range(w.shape[2]):
val = w[k, i, j]
temp[i, j] += factor1 * val
prec[i, j] += factor2 * val
Eprec[i, j] += factor3 * val
#%% --- Main Loop --- %%#
ts = time.time()
if __name__ == '__main__':
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the permanent arrays to be filled
tempYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smbYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
# Initialize semi-permanent array
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
# Loop over the "time" axis
for i in range(0, temperatures.shape[0]):
# Create empty semi-permanent arrays
temp = dummy.copy()
prec = dummy.copy()
Eprec = dummy.copy()
# Loop over the different weights
for k in range(w.shape[0]):
# Loops over the cells of the array to be upsampled
for mx in range(MX):
for my in range(MY):
interpolate_and_get_results(temp, prec, Eprec, w, i, k, mx, my)
# At each timestep, update the semi-permanent array using the results from the interpolate function
smb[np.logical_and(temp <= 0, prec > 0)] += prec[np.logical_and(temp <= 0, prec > 0)]
# Fill the permanent arrays (equivalent of storing the results at the end of every year)
# and reinitialize the semi-permanent array every 5th timestep
if i%5 == 0:
# Permanent
tempYEAR[int(i/5)] = temp
precYEAR[int(i/5)] = prec
EprecYEAR[int(i/5)] = Eprec
smbYEAR[int(i/5)] = smb
# Semi-permanent
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
print("Time spent:", time.time()-ts)
Note: This answer is not about how to use map, it's about "a better way".
You are doing a lot of redundant calculations. Believe it or not, this code outputs the same result.
# No change in the initialization section above.
ts = time.time()
if __name__ == '__main__':
dummy = np.zeros((w.shape[1], w.shape[2]))
# Initialize the permanent arrays to be filled
tempYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
precYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
EprecYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smbYEAR = np.zeros((9, dummy.shape[0], dummy.shape[1]))
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
temperatures_inter = temperatures - 272.15
w_inter = w.sum(axis=0)
alt_inter = (alt * (-6 / 1000)).sum()
for i in range(0, temperatures_inter.shape[0]):
temp_i = (temperatures_inter[i].sum() - alt_inter) * w_inter
prec_i = precipitation[i].sum() * w_inter
Eprec_i = snow[i].sum() * w_inter
condition = np.logical_and(temp_i <= 0, prec_i > 0)
smb[condition] += prec_i[condition]
if i % 5 == 0:
tempYEAR[i // 5] = temp_i
precYEAR[i // 5] = prec_i
EprecYEAR[i // 5] = Eprec_i
smbYEAR[i // 5] = smb
smb = np.zeros((dummy.shape[0], dummy.shape[1]))
print("Time spent:", time.time() - ts)
I verified the results by comparing them to the output of the code that uses numba. The difference is about 0.0000001, which is probably caused by rounding error.
print((tempYEAR_from_yours - tempYEAR_from_mine).sum()) # -8.429287845501676e-08
print((precYEAR_from_yours - precYEAR_from_mine).sum()) # 2.595697878859937e-09
print((EprecYEAR_from_yours - EprecYEAR_from_mine).sum()) # -7.430216442116944e-09
print((smbYEAR_from_yours - smbYEAR_from_mine).sum()) # -6.875431779462815e-09
On my PC, this code took 0.36 seconds. It does not use numba and is not even parallelized. It just eliminated redundancy.
I need to have X[0].shape be (N, 120, 160, 3) which works but i also need X[1].shape = (N, 1, 1) but i only get (N,)
I have tried reshaping it but i didn't manage to make it work.
I am working on a data_generator. I am trying to load inputs from a camera on a car into XY to be able to train a model on it (goal make an self driving car).
I am struggling with it.
Here is the part that makes my head hurt.
image_data = {
"image": np.zeros((120, 160, 3), np.float32),
"speed": 3.4,
"throttle": 0.4,
"steering": 0.14,
}
inputs=["image", "speed"]
outputs=["steering", "throttle"]
batch_size = 64
X = []
Y = []
for j in range(len(inputs)):
L = []
for i in range(batch_size):
data = np.array(list(image_data.items()))
L.append(data[j][1])
X.append(np.array(L))
Based on your current question, if you want to have a (64,1) shape, you can manually reshape it with np.reshape
X[1]=np.reshape(X[1],(64,1))
X[1].shape
Out[59]: (64, 1)
Again, it's inefficient to convert a list to numpy array and to list again. Do everything in a list first, only then, convert your X[0] and X[1] into numpy array.
for j in range(len(inputs)):
L = []
for i in range(batch_size):
data = list(image_data.items())
L.append(data[j][1])
X.append(L)
Here is the fix I found, i wasn't shearching for the correct solution.
def __data_generation(self):
X = [[] for _ in range(len(self.inputs))]
Y = [[] for _ in range(len(self.outputs))]
rdm_paths = np.random.choice(self.paths, size=self.batch_size)
for path in rdm_paths:
try:
image_data = io.load_image_data(path)
self.augm(image_data)
for i, inp in enumerate(self.inputs):
data = np.array(image_data[inp])
if len(data.shape) < 2:
data = np.expand_dims(data, axis=0)
X[i].append(data)
for i, out in enumerate(self.outputs):
data = np.array(image_data[out])
if len(data.shape) < 2:
data = np.expand_dims(data, axis=0)
Y[i].append(data)
except Exception:
logging.debug(f"Error processing {path}")
X = [np.array(x) for x in X]
Y = [np.array(y) for y in Y]
return X, Y ```
scipy generic_filter can be very slow when it is applied to a large N dimensional array. I am wondering if it can be parallelized using multiple cores since the filtering of each moving window is independent process.
I did a function that makes the generic_filter in parallel using the multiprocessing library. It divides the big array in blocks in the rows direction and makes each block in parallel.
def generic_filter_parallel(in_array,num_arrays,function_generic_filter,size):
"""
Parameters
----------
in_array : 2D numpy array
num_arrays : int
number of arrays into which the big array will be spllited
function_generic_filter : function
function to use in the generic filter
size : tupple
size of the filter
Returns
-------
filter_array : 2D numpy array
array filtered
"""
#get size for each dimension
window_size_rows = size[0]
window_size_cols = size[1]
##split arrays
array_split = np.array_split(in_array, num_arrays)
# generate a list of nrows as it can be different depending on how you split the array
nrows = []
for i_array in array_split:
nrows.append(i_array.shape[0])
# add borders in the upper and lower part of each spliited array
list_split_arrays = []
for n_array in range(num_arrays):
if n_array == 0:
aux = np.vstack((array_split[n_array], array_split[n_array + 1][0:window_size_rows, :]))
list_split_arrays.append(aux)
elif n_array < num_arrays - 1:
aux = np.vstack((array_split[n_array - 1][-window_size_rows::, :], array_split[n_array], array_split[n_array + 1][0:window_size_rows, :]))
list_split_arrays.append(aux)
else:
aux = np.vstack((array_split[n_array - 1][-window_size_rows::, :], array_split[n_array]))
list_split_arrays.append(aux)
#process in parallel each splitted array individually
pool = Pool(processes=num_arrays)
res_pool = pool.imap(partial(ndimage.generic_filter, function=function_generic_filter, size=(window_size_rows, window_size_cols)), list_split_arrays)
pool.close()
#collect the results
list_filters_parallel = []
for i in res_pool:
list_filters_parallel.append(i)
#compose the final image with each individual splitted array done in parallel
filter_array = np.zeros(in_array.shape)
ini_row = 0
fin_row = 0
for n_array in range(num_arrays):
if n_array == 0:
ini_row = 0
fin_row = nrows[n_array]
else:
ini_row = ini_row + nrows[n_array - 1]
fin_row = fin_row + nrows[n_array]
if n_array == 0:
aux = list_filters_parallel[n_array][ini_row:fin_row, :]
filter_array[ini_row:fin_row, :] = aux
elif n_array < num_arrays - 1:
aux = list_filters_parallel[n_array][window_size_rows:-window_size_rows, :]
filter_array[ini_row:fin_row, :] = aux
else:
aux = list_filters_parallel[n_array][window_size_rows::, :]
filter_array[ini_row:fin_row, :] = aux
return filter_array
I have been trying to implement k-means clustering with a heatmap, but have been unsuccessful.
Here is the initial data set:
https://raw.githubusercontent.com/gsprint23/cpts215/master/progassignments/files/simple.csv
And here is my code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import random
#%matplotlib inline
def truncate(f, n):
return math.floor(f * 10 ** n) / 10 ** n
def chooseCenter(data, centers):
length = data.shape
cent = []
while len(cent) < centers :
x = random.randrange(0,length[0])
y = random.randrange(0,length[1])
if data.iloc[x][y] not in cent:
d = truncate(data.iloc[x][y],2)
cent.append(d)
return cent
def distance(val, center):
return math.sqrt((val- center)**2)
def getDistances(centers, data):
length = data.shape
dist = []
for i in range(length[0]):
for j in range(length[1]):
y = []
for k in range(len(centers)):
val = distance(data.iloc[i][j], centers[k])
y.append(truncate(val,3))
dist.append(y)
return dist
def findClosest(data, dist):
close = data.copy()
length = close.shape
indexes = []
for i in range(len(dist)):
pt = min(dist[i])
idx = dist[i].index(pt)
indexes.append(idx)
#print(indexes)
length = data.shape
n = np.array(indexes)
n = pd.DataFrame(np.reshape(n, (length[0],length[1])))
#reshape this data frame into the same shape as the data
#keep running the find closest until there is no change
#try heatmap on this?
#this should cluster it, but to make sure test it
#might need to do some tweaking to this
return n
# for i in range(length[0]):
# for j in range(length[1]):
# print('dist[i]', dist[j])
# pt = min(dist[j])
# print(pt)
# idx = dist[j].index(pt)
# close.iloc[i][j] = int(idx)
#return close
def computeNewCenter(data, close):
d = dict()
for i in range(len(close)):
for j in range(len(close[0])):
d[close.iloc[i][j]] = []
for i in range(len(data)):
for j in range(len(data[0])):
if close.iloc[i][j] in d:
d[close.iloc[i][j]].append(data.iloc[i][j])
newCenters = []
for key, value in d.items():
m = np.mean(value)
newCenters.append(truncate(m, 3))
return newCenters
# lst = [[] * numcenters]
# for i in range(len(close)):
# for j in range(len(close[0])):
# if close.iloc[i][j]
def main():
data = np.array(pd.read_csv('https://raw.githubusercontent.com/gsprint23/cpts215/master/progassignments/files/simple.csv', header=None))
data = data.T
#print(data)
df = pd.DataFrame(data[1:], columns=data[0], dtype=float).T
df = df.iloc[::-1]
# print(df)
# print(df.iloc[1][9])
# print(df)
# print(df.iloc[0][1])
# heatmap = plt.pcolor(df, cmap=plt.cm.bwr)
# plt.colorbar(heatmap)
c = chooseCenter(df, 3)
print(c)
#print(len(c))
dist = getDistances(c, df)
#print(dist)
y = findClosest(df, dist)
# q = []
# for i in range(len(c)):
# q.append([])
# #print(q)
j = computeNewCenter(df, y)
#print(j)
length = df.shape
oldFrame = pd.DataFrame(np.ndarray((length[0],length[1])))
oldFrame = oldFrame.fillna(0)
ct=0
while y.equals(oldFrame) == False:
ct+=1
oldFrame = y.copy()
c = computeNewCenter(df, oldFrame)
#print(c)
dist = getDistances(c, df)
#print(dist)
y = findClosest(df, dist)
#print(y)
#plt.pcolor(df, cmap=plt.cm.bwr)
l = []
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 1:
l.append(df.iloc[i][j])
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 2:
l.append(df.iloc[i][j])
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 0:
l.append(df.iloc[i][j])
l = np.ndarray((length[0],length[1]))
l = pd.DataFrame(l)
print(l)
hm = plt.pcolor(l, cmap=plt.cm.bwr)
plt.colorbar(hm)
# print(y)
# print(c)
# print(ct)
#plt.pcolor(y, cmap=plt.cm.bwr)
if __name__ == '__main__':
main()
My line of thinking was this:
My current thought process was to first randomly choose the centers.
Then create a list of lists for each point for the distance to each center.
Find the index of the minimum distance for each point for each center.
Create a data frame of the same size as the data set and fill each index for each element with the index of the center the point is closest to.
Recompute the center by taking the mean of the points with the same center index
Repeat this process multiple times until the index data frame does not change.
Create a new data frame and add the points which have the same center point close together in the frame.
Then create the heatmap.
This did not seem to work though.
Just wondering, am I on the right track or am I completely off, and if I am on the right track which parts would I need to change in order to fix the issue. If not could you please point me on the right track.
Here is a comparison of the maps:
Here are the maps
The first one is the one my program generated while the second is the way it is supposed to look.
I know my problem lies in some part of the k-means clustering algorithm, and my guess is it is either in the reassignment stage where you reassign the points to the centroids and calculate the new centroids or in the stopping condition in that the algorithm does not run long enough. Also in the back of my head, something tells me that I am not doing this as efficiently as I could have and that I am missing something key. I have watched several videos on K-means clustering and understand it conceptually, I'm just having a hard time implementing it.
I'm trying to multiplicate 2 big matrices with memory limit using hdf5 (pytables)
but function numpy.dot seems to give me error:
Valueerror: array is too big
I need to do matrix multiplication by myself maybe blockwise or there is some another python function similar to numpy.dot?
import numpy as np
import time
import tables
import cProfile
import numexpr as ne
n_row=10000
n_col=100
n_batch=10
rows = n_row
cols = n_col
batches = n_batch
atom = tables.UInt8Atom() #?
filters = tables.Filters(complevel=9, complib='blosc') # tune parameters
fileName_a = 'C:\carray_a.h5'
shape_a = (rows*batches, cols) # predefined size
h5f_a = tables.open_file(fileName_a, 'w')
ca_a = h5f_a.create_carray(h5f_a.root, 'carray', atom, shape_a, filters=filters)
for i in range(batches):
data = np.random.rand(rows,cols)
ca_a[i*rows:(i+1)*rows]= data[:]
#h5f_0.close()
rows = n_col
cols = n_row
batches = n_batch
fileName_b = 'C:\carray_b.h5'
shape_b = (rows, cols*batches) # predefined size
h5f_b = tables.open_file(fileName_b, 'w')
ca_b = h5f_b.create_carray(h5f_b.root, 'carray', atom, shape_b, filters=filters)
#need to batch by cols
sz= rows/batches
for i in range(batches):
data = np.random.rand(sz, cols*batches)
ca_b[i*sz:(i+1)*sz]= data[:]
#h5f_1.close()
rows = n_batch*n_row
cols = n_batch*n_row
fileName_c = 'C:\carray_c.h5'
shape_c = (rows, cols) # predefined size
h5f_c = tables.open_file(fileName_c, 'w')
ca_c = h5f_c.create_carray(h5f_c.root, 'carray', atom, shape_c, filters=filters)
a= h5f_a.root.carray#[:]
b= h5f_b.root.carray#[:]
c= h5f_c.root.carray
t0= time.time()
c= np.dot(a,b) #error if aray is big
print (time.time()-t0)
Update: so here is the code.It's interesting but using hdf5 it works even faster.
import numpy as np
import tables
import time
sz= 100 #chunk size
n_row=10000 #m
n_col=1000 #n
#for arbitrary size
A=np.random.rand(n_row,n_col)
B=np.random.rand(n_col,n_row)
# A=np.random.randint(5, size=(n_row,n_col))
# B=np.random.randint(5, size=(n_col,n_row))
#using numpy array
#C= np.zeros((n_row,n_row))
#using hdf5
fileName_C = 'CArray_C.h5'
atom = tables.Float32Atom()
shape = (A.shape[0], B.shape[1])
Nchunk = 128 # ?
chunkshape = (Nchunk, Nchunk)
chunk_multiple = 1
block_size = chunk_multiple * Nchunk
h5f_C = tables.open_file(fileName_C, 'w')
C = h5f_C.create_carray(h5f_C.root, 'CArray', atom, shape, chunkshape=chunkshape)
sz= block_size
t0= time.time()
for i in range(0, A.shape[0], sz):
for j in range(0, B.shape[1], sz):
for k in range(0, A.shape[1], sz):
C[i:i+sz,j:j+sz] += np.dot(A[i:i+sz,k:k+sz],B[k:k+sz,j:j+sz])
print (time.time()-t0)
t0= time.time()
res= np.dot(A,B)
print (time.time()-t0)
print (C== res)
h5f_C.close()
I don't know of a np.dot that work without loading into memory. I think blocking would work pretty well. Create a an output array (called "c" below) as pytables CArray and fill in blocks. You should choose the chunkshape when you create it to match your blocking scheme. Something like
atom = tables.Float32Atom() # you have UInt8Atom() above. do you mean that?
shape = (a.shape[0], b.shape[1])
# you can vary block_size and chunkshape independently, but I would
# aim to have block_size an integer multiple of chunkshape
# your mileage may vary and depends on the array size and how you'll
# access it in the future.
Nchunk = 128 # ?
chunkshape = (Nchunk, Nchunk)
chunk_multiple = 1
block_size = chunk_multiple * Nchunk
c = h5f.create_carray(h5.root, 'c', atom, shape, chunkshape=chunkshape)
for i_start in range(0, a.shape[0], block_size):
for j_start in range(0, b.shape[1], block_size):
for k_start in range(0, a.shape[1], block_size):
c[i_start:i_start+block_size, j_start:j_start + block_size] += \
np.dot(a[i_start:i_start + block_size, k_start:k_start + block_size],
b[k_start:k_start + block_size, j_start:j_start + block_size]