Perform summation at indizes saved as x and y arrays - python

I have basic question regarding indexing. I have two array lists with len = 9mio encoding vectorized image coordinates that have been extracted via computation by a previous function. Now I want to decrease a heatmap using the vectorized data. I could use a for loop and zip the coordinates. However, I would prefer a faster solution like
T = [L[i] +=1 for i in zip(X,Y)]
or something. Is this possible?
coord = [x_coords,y_coords]
Heatmap[coord[0],coord[1]] -= 1

This is one solution using collections. I have also added performance comparison versus #Piinthesky's pandas solution.
import pandas as pd
from collections import Counter, OrderedDict
#your pre-existing heatmap as a numpy array
heat_map = np.arange(32).reshape(8, 4)
#your x and y pairs as lists
x = [2, 3, 0, 5, 6, 2, 3, 4, 3]
y = [3, 1, 2, 0, 3, 3, 1, 1, 1]
def jp_data_analysis(heat_map, x, y):
#count occurences of x, y pairs
c = OrderedDict(Counter(zip(x, y)))
#create numpy array with count as value at position x, y
x_c, y_c = list(zip(*c))
pic_occur[x_c, y_c] = list(c.values())
#subtract this from heatmap
heat_map -= pic_occur
return heat_map
def piinthesky(heat_map, x, y):
#count occurences of x, y pairs
df = pd.DataFrame({"x": x, "y": y}).groupby(["x", "y"]).size().reset_index(name='count')
#create numpy array with count as value at position x, y
pic_occur = np.zeros([heat_map.shape[0], heat_map.shape[1]], dtype = int)
pic_occur[df["x"], df["y"]] = df["count"]
#and subtract this from heatmap
heat_map -= pic_occur
return heat_map
%timeit jp_data_analysis(heat_map, x, y)
# 10000 loops, best of 3: 43.8 µs per loop
%timeit piinthesky(heat_map, x, y)
# 100 loops, best of 3: 4.45 ms per loop

This is a solution using numpy/pandas. The x, y convention is according to the usual connotation in pictures, but you better check this with your dataset.
import pandas as pd
#your pre-existing heatmap as a numpy array
heat_map = np.arange(32).reshape(8, 4)
#your x and y pairs as lists
x = [2, 3, 0, 5, 6, 2, 3, 4, 3]
y = [3, 1, 2, 0, 3, 3, 1, 1, 1]
#count occurences of x, y pairs
df = pd.DataFrame({"x": x, "y": y}).groupby(["x", "y"]).size().reset_index(name='count')
#create numpy array with count as value at position x, y
pic_occur = np.zeros([heat_map.shape[0], heat_map.shape[1]], dtype = int)
pic_occur[df["x"], df["y"]] = df["count"]
#and subtract this from heatmap
heat_map -= pic_occur

Related

Creating Density/Heatmap Plot from Coordinates and Magnitude in Python

I have some data which is the number of readings at each point on a 5x10 grid, which is in the format of;
X = [1, 2, 3, 4,..., 5]
Y = [1, 1, 1, 1,...,10]
Z = [9,8,14,0,89,...,0]
I would like to plot this as a heatmap/density map from above, but all of the matplotlib graphs (incl. contourf) that I have found are requiring a 2D array for Z and I don't understand why.
EDIT;
I have now collected the actual coordinates that I want to plot which are not as regular as what I have above they are;
X = [8,7,7,7,8,8,8,9,9.5,9.5,9.5,11,11,11,10.5,
10.5,10.5,10.5,9,9,8, 8,8,8,6.5,6.5,1,2.5,4.5,
4.5,2,2,2,3,3,3,4,4.5,4.5,4.5,4.5,3.5,2.5,2.5,
1,1,1,2,2,2]
Y = [5.5,7.5,8,9,9,8,7.5,6,6.5,8,9,9,8,6.5,5.5,
5,3.5,2,2,1,2,3.5,5,1,1,2,4.5,4.5,4.5,4,3,
2,1,1,2,3,4.5,3.5,2.5,1.5,1,5.5,5.5,6,7,8,9,
9,8,7]
z = [286,257,75,38,785,3074,1878,1212,2501,1518,419,33,
3343,1808,3233,5943,10511,3593,1086,139,565,61,61,
189,155,105,120,225,682,416,30632,2035,165,6777,
7223,465,2510,7128,2296,1659,1358,204,295,854,7838,
122,5206,6516,221,282]
From what I understand you can't use floats in a np.array so I have tried to multiply all values by 10 so that they are all integers, but I am still running into some issues. Am I trying to do something that will not work?
They expect a 2D array because they use the "row" and "column" to set the position of the value. For example, if array[2, 3] = 5, then when x is 2 and y is 3, the heatmap will use the value 5.
So, let's try transforming your current data into a single array:
>>> array = np.empty((len(set(X)), len(set(Y))))
>>> for x, y, z in zip(X, Y, Z):
array[x-1, y-1] = z
If X and Y are np.arrays, you could do this too (SO answer):
>>> array = np.empty((X.shape[0], Y.shape[0]))
>>> array[np.array(X) - 1, np.array(Y) - 1] = Z
And now just plot the array as you prefer:
>>> plt.imshow(array, cmap="hot", interpolation="nearest")
>>> plt.show()

Numpy sum of minimums of two arrays like dot product efficiently

I would like get two arrays' sum of minumums efficiently with numpy. For example;
X=np.array([[1,2,3],[1,2,0]])
Y=np.array([[0,2,0],[1,3,1]])
My result should be;
result = array([[2, 4],[2, 3]])
The calculation for first cell;
result[0,0] = min(X[0,0],Y[0,0])+ min(X[0,1],Y[0,1])+min(X[0,2],Y[0,2])
In general, the result should be:
res[i,j] = sum(np.minimum(X[i, :], Y[j, :]))
but looking for fastest way.
dot is the equivalent of taking outer products, and summing on the appropriate axis.
The equivalent in your case is:
In [291]: np.minimum(X[:,None,:], Y[None,:,:])
Out[291]:
array([[[0, 2, 0],
[1, 2, 1]],
[[0, 2, 0],
[1, 2, 0]]])
In [292]: np.sum(np.minimum(X[:,None,:], Y[None,:,:]),axis=-1)
Out[292]:
array([[2, 4],
[2, 3]])
Best I could do:
import numpy as np
def sum_mins(x, y):
mask = (X - Y) < 0
return np.sum(X*mask + Y*np.logical_not(mask))
X=np.array([1,2,3])
Y=np.array([0,2,0])
print(sum_mins(X, Y))
One naive approach close to definition:
result = np.array([[np.sum(np.minimum(v_x, v_y)) for v_y in Y] for v_x in X])
A combination of hpaulj's and my former answer (deleted) that works in case you run out of memory otherwise:
# maximum number of float32s in memory - determining a max. chunk size
MAX_CHUNK_MEM_SIZE = 1000 * 1024 * 1024 / 4
def _fast_small(x, y):
"""Process a case with small size of x and y."""
# see answer of #hpaulj
return np.sum(np.minimum(x[:, None, :], y[None, :, :]), axis = -1)
def fast(x, y):
"""Process a case with potentially large size of x and y."""
assert len(x.shape) == len(y.shape) == 2
assert x.shape[1] == y.shape[1]
num_chunks = int(np.ceil(x.shape[0] * y.shape[0] * x.shape[0] / MAX_CHUNK_MEM_SIZE))
result_blocks = []
for x_block in np.array_split(x, num_chunks):
result_blocks_row = []
for y_block in np.array_split(y, num_chunks):
result_blocks_row.append(_fast_small(x_block, y_block))
result_blocks.append(result_blocks_row)
return np.block(result_blocks)

Pythonic way of finding indexes of unique elements in two arrays

I have two sorted, numpy arrays similar to these ones:
x = np.array([1, 2, 8, 11, 15])
y = np.array([1, 8, 15, 17, 20, 21])
Elements never repeat in the same array. I want to figure out a way of pythonicaly figuring out a list of indexes that contain the locations in the arrays at which the same element exists.
For instance, 1 exists in x and y at index 0. Element 2 in x doesn't exist in y, so I don't care about that item. However, 8 does exist in both arrays - in index 2 in x but index 1 in y. Similarly, 15 exists in both, in index 4 in x, but index 2 in y. So the outcome of my function would be a list that in this case returns [[0, 0], [2, 1], [4, 2]].
So far what I'm doing is:
def get_indexes(x, y):
indexes = []
for i in range(len(x)):
# Find index where item x[i] is in y:
j = np.where(x[i] == y)[0]
# If it exists, save it:
if len(j) != 0:
indexes.append([i, j[0]])
return indexes
But the problem is that arrays x and y are very large (millions of items), so it takes quite a while. Is there a better pythonic way of doing this?
Without Python loops
Code
def get_indexes_darrylg(x, y):
' darrylg answer '
# Use intersect to find common elements between two arrays
overlap = np.intersect1d(x, y)
# Indexes of common elements in each array
loc1 = np.searchsorted(x, overlap)
loc2 = np.searchsorted(y, overlap)
# Result is the zip two 1d numpy arrays into 2d array
return np.dstack((loc1, loc2))[0]
Usage
x = np.array([1, 2, 8, 11, 15])
y = np.array([1, 8, 15, 17, 20, 21])
result = get_indexes_darrylg(x, y)
# result[0]: array([[0, 0],
[2, 1],
[4, 2]], dtype=int64)
Timing Posted Solutions
Results show that darrlg code has the fastest run time.
Code Adjustment
Each posted solution as a function.
Slight mod so that each solution outputs an numpy array.
Curve named after poster
Code
import numpy as np
import perfplot
def create_arr(n):
' Creates pair of 1d numpy arrays with half the elements equal '
max_val = 100000 # One more than largest value in output arrays
arr1 = np.random.randint(0, max_val, (n,))
arr2 = arr1.copy()
# Change half the elements in arr2
all_indexes = np.arange(0, n, dtype=int)
indexes = np.random.choice(all_indexes, size = n//2, replace = False) # locations to make changes
np.put(arr2, indexes, np.random.randint(0, max_val, (n//2, ))) # assign new random values at change locations
arr1 = np.sort(arr1)
arr2 = np.sort(arr2)
return (arr1, arr2)
def get_indexes_lllrnr101(x,y):
' lllrnr101 answer '
ans = []
i=0
j=0
while (i<len(x) and j<len(y)):
if x[i] == y[j]:
ans.append([i,j])
i += 1
j += 1
elif (x[i]<y[j]):
i += 1
else:
j += 1
return np.array(ans)
def get_indexes_joostblack(x, y):
'joostblack'
indexes = []
for idx,val in enumerate(x):
idy = np.searchsorted(y,val)
try:
if y[idy]==val:
indexes.append([idx,idy])
except IndexError:
continue # ignore index errors
return np.array(indexes)
def get_indexes_mustafa(x, y):
indices_in_x = np.flatnonzero(np.isin(x, y)) # array([0, 2, 4])
indices_in_y = np.flatnonzero(np.isin(y, x[indices_in_x])) # array([0, 1, 2]
return np.array(list(zip(indices_in_x, indices_in_y)))
def get_indexes_darrylg(x, y):
' darrylg answer '
# Use intersect to find common elements between two arrays
overlap = np.intersect1d(x, y)
# Indexes of common elements in each array
loc1 = np.searchsorted(x, overlap)
loc2 = np.searchsorted(y, overlap)
# Result is the zip two 1d numpy arrays into 2d array
return np.dstack((loc1, loc2))[0]
def get_indexes_akopcz(x, y):
' akopcz answer '
return np.array([
[i, j]
for i, nr in enumerate(x)
for j in np.where(nr == y)[0]
])
perfplot.show(
setup = create_arr, # tuple of two 1D random arrays
kernels=[
lambda a: get_indexes_lllrnr101(*a),
lambda a: get_indexes_joostblack(*a),
lambda a: get_indexes_mustafa(*a),
lambda a: get_indexes_darrylg(*a),
lambda a: get_indexes_akopcz(*a),
],
labels=["lllrnr101", "joostblack", "mustafa", "darrylg", "akopcz"],
n_range=[2 ** k for k in range(5, 21)],
xlabel="Array Length",
# More optional arguments with their default values:
# logx="auto", # set to True or False to force scaling
# logy="auto",
equality_check=None, #np.allclose, # set to None to disable "correctness" assertion
# show_progress=True,
# target_time_per_measurement=1.0,
# time_unit="s", # set to one of ("auto", "s", "ms", "us", or "ns") to force plot units
# relative_to=1, # plot the timings relative to one of the measurements
# flops=lambda n: 3*n, # FLOPS plots
)
What you are doing is O(nlogn) which is decent enough.
If you want, you can do it in O(n) by iterating on both arrays with two pointers and since they are sorted, increase the pointer for the array with smaller object.
See below:
x = [1, 2, 8, 11, 15]
y = [1, 8, 15, 17, 20, 21]
def get_indexes(x,y):
ans = []
i=0
j=0
while (i<len(x) and j<len(y)):
if x[i] == y[j]:
ans.append([i,j])
i += 1
j += 1
elif (x[i]<y[j]):
i += 1
else:
j += 1
return ans
print(get_indexes(x,y))
which gives me:
[[0, 0], [2, 1], [4, 2]]
Although, this function will search for all the occurances of x[i] in the y array, if duplicates are not allowed in y it will find x[i] exactly once.
def get_indexes(x, y):
return [
[i, j]
for i, nr in enumerate(x)
for j in np.where(nr == y)[0]
]
You can use numpy.searchsorted:
def get_indexes(x, y):
indexes = []
for idx,val in enumerate(x):
idy = np.searchsorted(y,val)
if y[idy]==val:
indexes.append([idx,idy])
return indexes
One solution is to first look from x's side to see what values are included in y by getting their indices through np.isin and np.flatnonzero, and then use the same procedure from the other side; but instead of giving x entirely, we give only the (already found) intersected elements to gain time:
indices_in_x = np.flatnonzero(np.isin(x, y)) # array([0, 2, 4])
indices_in_y = np.flatnonzero(np.isin(y, x[indices_in_x])) # array([0, 1, 2])
Now you can zip them to get the result:
result = list(zip(indices_in_x, indices_in_y)) # [(0, 0), (2, 1), (4, 2)]

kNN feature should passed through as list

my data is like:
sample1 = [[1, 0, 3, 5, 0, 9], 0, 1.5, 0]
sample2 = [[0, 4, 0, 6, 2, 0], 2, 1.9, 1]
sample3 = [[9, 7, 6, 0, 0, 0], 0, 1.3, 1]
paul = pd.DataFrame(data = [sample1, sample2, sample3], columns=`['list','cat','metr','target'])`
on this data a scikit-learn kNN-Regression with an specific distance function should be done.
The distance function is:
def my_distance(X,Y,**kwargs):
if len(X)>1:
x = X
y = Y
all_minima = []
for k in range(len(x)):
one_minimum = min(x[k],y[k])
all_minima.append(one_minimum)
sum_all_minima=sum(all_minima)
distance = (sum(x)+sum(y)-sum_all_minima) * kwargs["Para_list"]
elif X.dtype=='int64':
x = X
y = Y
if x == y and x != -1:
distance = 0
elif x == -1 or y == -1 or x is None or y is None:
distance = kwargs["Para_minus1"] * 1
else:
distance = kwargs["Para_nominal"] * 1
else:
x = X
y = Y
if x == y:
distance = 0
elif x == -1 or y == -1 or x is None or y is None:
distance = kwargs["Para_minus1"] * 1
else:
distance = abs(x-y) * kwargs["Para_metrisch"]
return distance
And should be implemented as valid distance function by
DistanceMetric.get_metric('pyfunc',func=my_distance)
As I'm right, the scikit code should be like this:
train , test = train_test_split(paul, test_size = 0.3)
#x_train soll nur unabhähgige Variablen enthalten, andere kommen raus:
x_train = train.drop('target', axis=1)
y_train = train['target']
x_test = test.drop('target', axis = 1)
y_test = test['target']
knn = KNeighborsRegressor(n_neighbors=2,
algorithm='ball_tree',
metric=my_distance,
metric_params={"Para_list": 2,
"Para_minus1": 3,
"Para_metrisch": 2,
"Para_nominal": 4}))
knn.fit(x_train,y_train)
y_pred=knn.predict(x_test)
I get
ValueError: setting an array element with a sequence.
I guess scikit can not handle a single feature item as list? Is there a way to make that happen?
I guess scikit can not handle a single feature item as list? Is there a way to make that happen?
No, there is no way I know of to make this happen. You need to convert this feature into 2D matrix, concatenate it with other 1D features, to form data appropriately. This is standard sklearn behavior.
Unless you have some very narrow use-case, making 2D array from list feature is totally fine. I assume, all lists have same length.

Indices that intersect and sort two numpy arrays

I have two numpy arrays of integers, both of length several hundred million. Within each array values are unique, and each is initially unsorted.
I would like the indices to each that yield their sorted intersection. For example:
x = np.array([4, 1, 10, 5, 8, 13, 11])
y = np.array([20, 5, 4, 9, 11, 7, 25])
Then the sorted intersection of these is [4, 5, 11], and so we want the indices that turn each of x and y into that array, so we want it to return:
mx = np.array([0, 3, 6])
my = np.array([2, 1, 4])
since then x[mx] == y[my] == np.intersect1d(x, y)
The only solution we have so far involves three different argsorts, so it seems that is unlikely to be optimal.
Each value represents a galaxy, in case that makes the problem more fun.
Here's an option based on intersect1d's implementation, which is fairly straightforward. It requires one call to argsort.
The admittedly simplistic test passes.
import numpy as np
def my_intersect(x, y):
"""my_intersect(x, y) -> xm, ym
x, y: 1-d arrays of unique values
xm, ym: indices into x and y giving sorted intersection
"""
# basic idea taken from numpy.lib.arraysetops.intersect1d
aux = np.concatenate((x, y))
sidx = aux.argsort()
# Note: intersect1d uses aux[:-1][aux[1:]==aux[:-1]] here - I don't know why the first [:-1] is necessary
inidx = aux[sidx[1:]] == aux[sidx[:-1]]
# quicksort is not stable, so must do some work to extract indices
# (if stable, sidx[inidx.nonzero()] would be for x)
# interlace the two sets of indices, and check against lengths
xym = np.vstack((sidx[inidx.nonzero()],
sidx[1:][inidx.nonzero()])).T.flatten()
xm = xym[xym < len(x)]
ym = xym[xym >= len(x)] - len(x)
return xm, ym
def check_my_intersect(x, y):
mx, my = my_intersect(x, y)
assert (x[mx] == np.intersect1d(x, y)).all()
# not really necessary: np.intersect1d returns a sorted list
assert (x[mx] == sorted(x[mx])).all()
assert (x[mx] == y[my]).all()
def random_unique_unsorted(n):
while True:
x = np.unique(np.random.randint(2*n, size=n))
if len(x):
break
np.random.shuffle(x)
return x
x = np.array([4, 1, 10, 5, 8, 13, 11])
y = np.array([20, 5, 4, 9, 11, 7, 25])
check_my_intersect(x, y)
for i in range(20):
x = random_unique_unsorted(100+i)
y = random_unique_unsorted(200+i)
check_my_intersect(x, y)
Edit: "Note" comment was confusing (Used ... as speech ellipsis, forgot it was a Python operator too).
You could also use np.searchsorted, like so -
def searchsorted_based(x,y):
# Get argsort for both x and y
xsort_idx = x.argsort()
ysort_idx = y.argsort()
# Sort x and y and store them
X = x[xsort_idx]
Y = y[ysort_idx]
# Find positions of Y in X and the matches by the positions that
# shift between 'left' and 'right' based searches.
# Use the matches posotions to get corresponding argsort for X.
x1 = np.searchsorted(X,Y,'left')
x2 = np.searchsorted(X,Y,'right')
out1 = xsort_idx[x1[x2 != x1]]
# Repeat for X in Y findings
y1 = np.searchsorted(Y,X,'left')
y2 = np.searchsorted(Y,X,'right')
out2 = ysort_idx[y1[y2 != y1]]
return out1, out2
Sample run -
In [100]: x = np.array([4, 1, 10, 5, 8, 13, 11])
...: y = np.array([20, 5, 4, 9, 11, 7, 25])
...:
In [101]: searchsorted_based(x,y)
Out[101]: (array([0, 3, 6]), array([2, 1, 4]))
For a pure numpy solution you could do something like this:
Use np.unique to get the unique values and corresponding indices in x and y separately:
# sorted unique values in x and y and the indices corresponding to their first
# occurrences, such that u_x == x[u_idx_x]
u_x, u_idx_x = np.unique(x, return_index=True)
u_y, u_idx_y = np.unique(y, return_index=True)
Find the intersection of the unique values using np.intersect1d:
# we can assume_unique, which can be faster for large arrays
i_xy = np.intersect1d(u_x, u_y, assume_unique=True)
Finally, use np.in1d to select only the indices that correspond to unique values in x or y that also happen to be in the intersection of x and y:
# it is also safe to assume_unique here
i_idx_x = u_idx_x[np.in1d(u_x, i_xy, assume_unique=True)]
i_idx_y = u_idx_y[np.in1d(u_y, i_xy, assume_unique=True)]
To pull all that together into a single function:
def intersect_indices(x, y):
u_x, u_idx_x = np.unique(x, return_index=True)
u_y, u_idx_y = np.unique(y, return_index=True)
i_xy = np.intersect1d(u_x, u_y, assume_unique=True)
i_idx_x = u_idx_x[np.in1d(u_x, i_xy, assume_unique=True)]
i_idx_y = u_idx_y[np.in1d(u_y, i_xy, assume_unique=True)]
return i_idx_x, i_idx_y
For example:
x = np.array([4, 1, 10, 5, 8, 13, 11])
y = np.array([20, 5, 4, 9, 11, 7, 25])
i_idx_x, i_idx_y = intersect_indices(x, y)
print(i_idx_x, i_idx_y)
# (array([0, 3, 6]), array([2, 1, 4]))
Speed test:
In [1]: k = 1000000
In [2]: %%timeit x, y = np.random.randint(k, size=(2, k))
intersect_indices(x, y)
....:
1 loops, best of 3: 597 ms per loop
Update:
I initially missed the fact that in your case both x and y contain only unique values. Taking that into account, it's possible to do slightly better by using an indirect sort:
def intersect_indices_unique(x, y):
u_idx_x = np.argsort(x)
u_idx_y = np.argsort(y)
i_xy = np.intersect1d(x, y, assume_unique=True)
i_idx_x = u_idx_x[x[u_idx_x].searchsorted(i_xy)]
i_idx_y = u_idx_y[y[u_idx_y].searchsorted(i_xy)]
return i_idx_x, i_idx_y
Here's a more realistic test case, where x and y both contain unique (but partially overlapping) values:
In [1]: n, k = 10000000, 1000000
In [2]: %%timeit x, y = (np.random.choice(n, size=k, replace=False) for _ in range(2))
intersect_indices(x, y)
....:
1 loops, best of 3: 593 ms per loop
In [3]: %%timeit x, y = (np.random.choice(n, size=k, replace=False) for _ in range(2))
intersect_indices_unique(x, y)
....:
1 loops, best of 3: 453 ms per loop
#Divakar's solution is very similar in terms of performance:
In [4]: %%timeit x, y = (np.random.choice(n, size=k, replace=False) for _ in range(2))
searchsorted_based(x, y)
....:
1 loops, best of 3: 472 ms per loop
Maybe a pure Python solutions using a dict works for you:
def indices_from_values(a, intersect):
idx = {value: index for index, value in enumerate(a)}
return np.array([idx[x] for x in intersect])
intersect = np.intersect1d(x, y)
mx = indices_from_values(x, intersect)
my = indices_from_values(y, intersect)
np.allclose(x[mx], y[my]) and np.allclose(x[mx], np.intersect1d(x, y))

Categories

Resources