Numpy: How to replace array with single value with vectorization? - python

I have an image saved as numpy array of shape [Height, Width, 3] and I want to replace every pixel with another value based on the color of pixel, so the final array will have a shape [Height, Weight].
My solution with for loop works but it's pretty slow. How can I use Numpy vectorization to make it more efficient?
image = cv2.imread("myimage.png")
result = np.zeros(shape=(image.shape[0], image.shape[1],))
for h in range(0, result.shape[0]):
for w in range(0, result.shape[1]):
result[h, w] = get_new_value(image[h, w])
Here is get_new_value function:
def get_new_value(array: np.ndarray) -> int:
mapping = {
(0, 0, 0): 0,
(0, 0, 255): 5,
(0, 100, 200): 8,
# ...
}
return mapping[tuple(array)]

you can use np.select() as shown below:
img=np.array(
[[[123 123 123]
[130 130 130]]
[[129 128 128]
[162 162 162]]])
condlist = [img==[123,123,123], img==[130, 130, 130], img==[129, 129, 129], img==[162, 162, 162]]
choicelist = [0, 5, 8, 9]
img_replaced = np.select(condlist, choicelist)
final = img_replaced[:, :, 0]
print('img_replaced')
print(img_replaced)
print('final')
print(final)
condlist is your list of colour values and choicelist is the list of replacements.
np.select then returns three channels and you just need to take one channel from that to give the array 'final' which is the format you want I believe
output is:
img_replaced
[[[0 0 0]
[5 5 5]]
[[0 0 0]
[9 9 9]]]
final
[[0 5]
[0 9]]
so code specific to your example and shown colour mappings would be:
image = cv2.imread("myimage.png")
condlist = [image==[0, 0, 0], image==[0, 0, 255], image==[0, 100, 200]]
choicelist = [0, 5, 8]
img_replaced = np.select(condlist, choicelist)
result = img_replaced[:, :, 0]

Related

How can I calculate histograms with merged bins?

I want to ask you about calculating the histogram in Python using OpenCV. I used this code:
hist = cv2.calcHist(im, [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
The result gave me the histogram of each color channel with 8 bins, but what I want to get is:
1st bin (R=0-32,G=0-32,B=0-32),
2nd bin (R=33-64,G=0-32,B=0-32),
and so on,
so I will have 512 bins in total.
From my point of view, your cv2.calcHist call isn't correct:
hist = cv2.calcHist(im, [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
The first parameter should be a list of images:
hist = cv2.calcHist([im], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
Let's see this small example:
import cv2
import numpy as np
# Red blue square of size [4, 4], i.e. eight pixels (255, 0, 0) and eight pixels (0, 0, 255); Attention: BGR ordering!
image = np.zeros((4, 4, 3), dtype=np.uint8)
image[:, 0:2, 2] = 255
image[:, 2:4, 0] = 255
# Calculate histogram with two bins [0 - 127] and [128 - 255] per channel:
# Result should be hist["bin 0", "bin 0", "bin 1"] = 8 (red) and hist["bin 1", "bin 0", "bin 0"] = 8 (blue)
# Original cv2.calcHist call with two bins [0 - 127] and [128 - 255]
hist = cv2.calcHist(image, [0, 1, 2], None, [2, 2, 2], [0, 256, 0, 256, 0, 256])
print(hist, '\n') # Not correct
# Correct cv2.calcHist call
hist = cv2.calcHist([image], [0, 1, 2], None, [2, 2, 2], [0, 256, 0, 256, 0, 256])
print(hist, '\n') # Correct
[[[8. 0.]
[0. 0.]]
[[0. 0.]
[0. 4.]]]
[[[0. 8.]
[0. 0.]]
[[8. 0.]
[0. 0.]]]
As you can, your version only has 12 values in total, whereas there are 16 pixels in the image! Also, it's not clear, what "bins" (if at all) are represented.
So, having the proper cv2.calcHist call, your general idea/approach is correct! Maybe, you just need a little hint, "how to read" the resuling hist:
import cv2
import numpy as np
# Colored rectangle of size [32, 16] with one "color" per bin for eight bins per channel,
# i.e. 512 pixels, such that each of the resulting 512 bins has value 1
x = np.linspace(16, 240, 8, dtype=np.uint8)
image = np.reshape(np.moveaxis(np.array(np.meshgrid(x, x, x)), [0, 1, 2, 3], [3, 0, 1, 2]), (32, 16, 3))
# Correct cv2.calcHist call
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
# Lengthy output of each histogram bin
for B in np.arange(hist.shape[0]):
for G in np.arange(hist.shape[1]):
for R in np.arange(hist.shape[2]):
r = 'R=' + str(R*32).zfill(3) + '-' + str((R+1)*32-1).zfill(3)
g = 'G=' + str(G*32).zfill(3) + '-' + str((G+1)*32-1).zfill(3)
b = 'B=' + str(B*32).zfill(3) + '-' + str((B+1)*32-1).zfill(3)
print('(' + r + ', ' + g + ', ' + b + '): ', int(hist[B, G, R]))
(R=000-031, G=000-031, B=000-031): 1
(R=032-063, G=000-031, B=000-031): 1
(R=064-095, G=000-031, B=000-031): 1
[... 506 more lines ...]
(R=160-191, G=224-255, B=224-255): 1
(R=192-223, G=224-255, B=224-255): 1
(R=224-255, G=224-255, B=224-255): 1
Hope that helps!

numpy: combine image mask with RGB to get colored image mask

how can I combine a binary mask image array (this_mask - shape:4,4) with a predefined color array (mask_color, shape:3)
this_mask = np.array([
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
])
this_mask.shape # (4,4)
mask_color = np.array([128, 128, 64])
mask_color.shape # (3)
to get a new color mask image array (this_mask_colored, shape:4,4,3)?
this_mask_colored = # do something with `this_mask` and `mask_color`
# [
# [
# [0,128,0],
# [0,0,0],
# [0,0,0],
# [0,0,0]
# ],
# [
# [0,128,0],
# [0,0,0],
# [0,0,0],
# [0,0,0]
# ],
# [
# [0,64,0],
# [0,0,0],
# [0,0,0],
# [0,0,0]
# ],
# ]
this_mask_colored.shape # (4,4,3)
I tried for loop through pixel by pixel, is it slow when when image is 225x225, what is best way to do this?
For each image, I have multiple layers of mask, and each mask layer needs to have a different predefine color.
This might work:
this_mask = np.array([
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
])
mask_color = np.array([128, 128, 64])
res = []
for row in new:
tmp = []
for col in row:
tmp.append(np.array([1,1,1]) * col)
res.append(np.array(tmp))
res = res * mask_color
For each entry, 1 will be converted to [1, 1, 1] and 0 is [0, 0, 0]
I do this because I want to use the benefit of the operation * (element-wise multiplication)
This works:
test = np.array([[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
test * np.array([128, 128, 64])
We'll get
array([[ 0, 0, 0],
[128, 128, 64],
[ 0, 0, 0],
[ 0, 0, 0]])
And we want to put all the calculation to the numpy's side. So we loop through the array just for conversion and the rest is for numpy.
This takes 0.2 secs for 255x255 of 1 with one mask_color and 2 secs for 1000x1000
The following function should do what you want.
def apply_mask_color(mask, mask_color):
return np.concatenate(([mask[ ... , np.newaxis] * color for color in mask_color]), axis=2)
Given the following code:
this_mask = np.array([
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
])
mask_color = np.array([128, 128, 64])
applied = apply_mask_color(this_mask, mask_color)
print(applied.shape) #(4, 4, 3)
It is important to note that the output isn't QUITE what you expected. Rather, every element inside is now a 3 dimensional array housing the R G B values detailed in mask_color
print(applied)
Output:
[[[ 0 0 0]
[128 128 64]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 0 0 0]
[ 0 0 0]]]
I think is is more what you're looking for.

Convert multi-dimensional Numpy array to 2-dimensional array based on color values

I have an image which is read as a uint8 array with the shape (512,512,3).
Now I would like to convert this array to a uint8 array of shape (512,512,1), where each pixel value in the third axis are converted from a color value [255,0,0] to a single class label value [3], based on the following color/class encoding:
1 : [0, 0, 0],
2 : [0, 0, 255],
3 : [255, 0, 0],
4 : [150, 30, 150],
5 : [255, 65, 255],
6 : [150, 80, 0],
7 : [170, 120, 65],
8 : [125, 125, 125],
9 : [255, 255, 0],
10 : [0, 255, 255],
11 : [255, 150, 0],
12 : [255, 225, 120],
13 : [255, 125, 125],
14 : [200, 100, 100],
15 : [0, 255, 0],
16 : [0, 150, 80],
17 : [215, 175, 125],
18 : [220, 180, 210],
19 : [125, 125, 255]
What is the most efficient way to do this? I thought of looping through all classes and using numpy.where, but this is obviously time-consuming.
You could use giant lookup table. Let cls be [[0,0,0], [0,0,255], ...] of dtype=np.uint8.
LUT = np.zeros(size=(256,256,256), dtype='u1')
LUT[cls[:,0],cls[:,1],cls[:,2]] = np.arange(cls.shape[1])+1
img_as_cls = LUT[img[...,0],img[...,1], img[...,2]]
This solution is O(1) per pixel. It is also quite cache efficient because a small part of entries in LUT are actually used. It takes circa 10ms to process 1000x1000 image on my machine.
The solution can be slightly improved by converting 3-color channels to 24-bit integers.
Here is the code
def scalarize(x):
# compute x[...,2]*65536+x[...,1]*256+x[...,0] in efficient way
y = x[...,2].astype('u4')
y <<= 8
y +=x[...,1]
y <<= 8
y += x[...,0]
return y
LUT = np.zeros(2**24, dtype='u1')
LUT[scalarize(cls)] = 1 + np.arange(cls.shape[0])
simg = scalarize(img)
img_to_cls = LUT[simg]
After optimization it takes about 5ms to process 1000x1000 image.
One way: separately create the boolean arrays with True values where the input's pixel value matches one of the palette values, and then use arithmetic to combine them. Thus:
palette = [
[0, 0, 0],
[0, 0, 255],
[255, 0, 0],
# etc.
]
def palettized(data, palette):
# Initialize result array
shape = list(data.shape)
shape[-1] = 1
result = np.zeros(shape)
# Loop and add each palette index component.
for value, colour in enumerate(palette, 1):
result += (data == colour).all(axis=2) * value
return result
Here's one based on views -
# https://stackoverflow.com/a/45313353/ #Divakar
def view1D(a, b): # a, b are arrays
# This function gets 1D view into 2D input arrays
a = np.ascontiguousarray(a)
b = np.ascontiguousarray(b)
void_dt = np.dtype((np.void, a.dtype.itemsize * a.shape[-1]))
return a.view(void_dt).ravel(), b.view(void_dt).ravel()
def img2label(a, maps):
# Get one-dimension reduced view into input image and map arrays.
# We need to reshape image to 2D, then feed it to view1D to get 1D
# outputs and then reshape 1D image to 2D
A,B = view1D(a.reshape(-1,a.shape[-1]),maps)
A = A.reshape(a.shape[:2])
# Trace back positions of A in B using searchsorted. This gives us
# original order, which is the final output.
sidx = B.argsort()
return sidx[np.searchsorted(B,A,sorter=sidx)]
Given that your labels start from 1, you might want to add 1 to the output.
Sample run -
In [100]: # Mapping array
...: maps = np.array([[0, 0, 0],[0, 0, 255],\
...: [255, 0, 0],[150, 30, 150]],dtype=np.uint8)
...:
...: # Setup random image array
...: idx = np.array([[0,2,1,3],[1,3,2,0]])
...: img = maps[idx]
In [101]: img2label(img, maps) # should retrieve back idx
Out[101]:
array([[0, 2, 1, 3],
[1, 3, 2, 0]])

How to Generate Adjacent Indecies w/ NumPy

So I'm trying to generate a list of possible adjacent movements within a 3d array (preferebly n-dimensional).
What I have works as it's supposed to, but I was wondering if there's a more numpythonic way to do so.
def adjacents(loc, bounds):
adj = []
bounds = np.array(bounds) - 1
if loc[0] > 0:
adj.append((-1, 0, 0))
if loc[1] > 0:
adj.append((0, -1, 0))
if loc[2] > 0:
adj.append((0, 0, -1))
if loc[0] < bounds[0]:
adj.append((1, 0, 0))
if loc[1] < bounds[1]:
adj.append((0, 1, 0))
if loc[2] < bounds[2]:
adj.append((0, 0, 1))
return np.array(adj)
Here are some example outputs:
adjacents((0, 0, 0), (10, 10, 10))
= [[1 0 0]
[0 1 0]
[0 0 1]]
adjacents((9, 9, 9), (10, 10, 10))
= [[-1 0 0]
[ 0 -1 0]
[ 0 0 -1]]
adjacents((5, 5, 5), (10, 10, 10))
= [[-1 0 0]
[ 0 -1 0]
[ 0 0 -1]
[ 1 0 0]
[ 0 1 0]
[ 0 0 1]]
Here's an alternative which is vectorized and uses a constant, prepopulated array:
# all possible moves
_moves = np.array([
[-1, 0, 0],
[ 0,-1, 0],
[ 0, 0,-1],
[ 1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]])
def adjacents(loc, bounds):
loc = np.asarray(loc)
bounds = np.asarray(bounds)
mask = np.concatenate((loc > 0, loc < bounds - 1))
return _moves[mask]
This uses asarray() instead of array() because it avoids copying if the input happens to be an array already. Then mask is constructed as an array of six bools corresponding to the original six if conditions. Finally, the appropriate rows of the constant data _moves are returned.
But what about performance?
The vectorized approach above, while it will appeal to some, actually runs only half as fast as the original. If it's performance you're after, the best simple change you can make is to remove the line bounds = np.array(bounds) - 1 and subtract 1 inside each of the last three if conditions. That gives you a 2x speedup (because it avoids creating an unnecessary array).

Improving performance of numpy mapping operation

I have a numpy array of size (4, X, Y), where the first dimension stands for an (R,G,B,A) quadruplet.
My aim is to transpose each X*Y RGBA quadruplets to X*Y floating-point values, given a dictionary matching them.
My current code is as follows:
codeTable = {
(255, 255, 255, 127): 5.5,
(128, 128, 128, 255): 6.5,
(0 , 0 , 0 , 0 ): 7.5,
}
for i in range(0, rows):
for j in range(0, cols):
new_data[i,j] = codeTable.get(tuple(data[:,i,j]), -9999)
Where data is a numpy array of size (4, rows, cols), and new_data is of size (rows, cols).
The code is working fine, but takes quite a long time. How should I optimize that piece of code?
Here is a full example:
import numpy
codeTable = {
(253, 254, 255, 127): 5.5,
(128, 129, 130, 255): 6.5,
(0 , 0 , 0 , 0 ): 7.5,
}
# test data
rows = 2
cols = 2
data = numpy.array([
[[253, 0], [128, 0], [128, 0]],
[[254, 0], [129, 144], [129, 0]],
[[255, 0], [130, 243], [130, 5]],
[[127, 0], [255, 120], [255, 5]],
])
new_data = numpy.zeros((rows,cols), numpy.float32)
for i in range(0, rows):
for j in range(0, cols):
new_data[i,j] = codeTable.get(tuple(data[:,i,j]), -9999)
# expected result for `new_data`:
# array([[ 5.50000000e+00, 7.50000000e+00],
# [ 6.50000000e+00, -9.99900000e+03],
# [ 6.50000000e+00, -9.99900000e+03], dtype=float32)
Here's an approach that returns your expected result, but with such a small amount of data it's hard to know if this will be faster for you. Since I've avoided the double for loop, however, I imagine you'll see a pretty decent speedup.
import numpy
import pandas as pd
codeTable = {
(253, 254, 255, 127): 5.5,
(128, 129, 130, 255): 6.5,
(0 , 0 , 0 , 0 ): 7.5,
}
# test data
rows = 3
cols = 2
data = numpy.array([
[[253, 0], [128, 0], [128, 0]],
[[254, 0], [129, 144], [129, 0]],
[[255, 0], [130, 243], [130, 5]],
[[127, 0], [255, 120], [255, 5]],
])
new_data = numpy.zeros((rows,cols), numpy.float32)
for i in range(0, rows):
for j in range(0, cols):
new_data[i,j] = codeTable.get(tuple(data[:,i,j]), -9999)
def create_output(data):
# Reshape your two data sources to be a bit more sane
reshaped_data = data.reshape((4, -1))
df = pd.DataFrame(reshaped_data).T
reshaped_codeTable = []
for key in codeTable.keys():
reshaped = list(key) + [codeTable[key]]
reshaped_codeTable.append(reshaped)
ct = pd.DataFrame(reshaped_codeTable)
# Merge on the data, replace missing merges with -9999
result = df.merge(ct, how='left')
newest_data = result[4].fillna(-9999)
# Reshape
output = newest_data.reshape(rows, cols)
return output
output = create_output(data)
print(output)
# array([[ 5.50000000e+00, 7.50000000e+00],
# [ 6.50000000e+00, -9.99900000e+03],
# [ 6.50000000e+00, -9.99900000e+03])
print(numpy.array_equal(new_data, output))
# True
The numpy_indexed package (disclaimer: I am its author) contains a vectorized nd-array capable variant of list.index, which can be used to solve your problem efficiently and concisely:
import numpy_indexed as npi
map_keys = np.array(list(codeTable.keys()))
map_values = np.array(list(codeTable.values()))
indices = npi.indices(map_keys, data.reshape(4, -1).T, missing='mask')
remapped = np.where(indices.mask, -9999, map_values[indices.data]).reshape(data.shape[1:])

Categories

Resources