I have a vtk file outputted from a program, and what I require is a 3D data set. This vtk file is an ROI which I drew in another program, and I would like to convert it into a mask of the ROI (i.e., 1s where the ROI is, 0 everywhere else). It should be of the same size as the original 3D image which had the ROI drawn on top of it. I'm working in Python. Does anyone know how I can convert this vtk file into essentially a 3D numpy array?
Here is a function that converts a VTK to a Numpy array for SimpleITK.
You can use this blog/documentation website for VTK examples: https://lorensen.github.io/VTKExamples/site/
import numpy as np
import SimpleITK as sitk
import vtk
from vtk.util import numpy_support
def vtk2stik(self, polydata, spacing, inval=1, outval=0, cast_float32=True):
# compute dimensions
bounds = polydata.GetBounds()
dim = [0] * 3
for i in range(3):
dim[i] = int(math.ceil((bounds[i * 2 + 1] - bounds[i * 2]) / spacing[i])) + 1
if dim[i] < 1:
dim[i] = 1
origin = [0] * 3
# NOTE: I am not sure whether or not we had to add some offset!
origin[0] = bounds[0] # + spacing[0] / 2
origin[1] = bounds[2] # + spacing[1] / 2
origin[2] = bounds[4] # + spacing[2] / 2
# Convert the VTK array to vtkImageData
whiteImage = vtk.vtkImageData()
whiteImage.SetDimensions(dim)
whiteImage.SetExtent(0, dim[0] - 1, 0, dim[1] - 1, 0, dim[2] - 1)
whiteImage.SetSpacing(spacing)
whiteImage.SetOrigin(origin)
whiteImage.GetPointData()
whiteImage.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# fill the image with foreground voxels:
count = whiteImage.GetNumberOfPoints()
for i in range(count):
whiteImage.GetPointData().GetScalars().SetTuple1(i, inval)
# polygonal data -. image stencil:
pol2stenc = vtk.vtkPolyDataToImageStencil()
pol2stenc.SetTolerance(0) # important if extruder.SetVector(0, 0, 1) !!!
pol2stenc.SetInputData(polydata)
pol2stenc.SetOutputOrigin(origin)
pol2stenc.SetOutputSpacing(spacing)
pol2stenc.SetOutputWholeExtent(whiteImage.GetExtent())
pol2stenc.Update()
# cut the corresponding white image and set the background:
imgstenc = vtk.vtkImageStencil()
imgstenc.SetInputData(whiteImage)
imgstenc.SetStencilConnection(pol2stenc.GetOutputPort())
imgstenc.ReverseStencilOff()
imgstenc.SetBackgroundValue(outval)
imgstenc.Update()
# imgstenc.GetOutput().GetPointData().GetArray(0)
np_array = numpy_support.vtk_to_numpy(imgstenc.GetOutput().GetPointData().GetScalars())
sitk_img = sitk.GetImageFromArray(np_array.reshape(dim[2], dim[1], dim[0])) # reversed dimension here
sitk_img.SetSpacing(spacing)
sitk_img.SetOrigin(origin)
if cast_float32:
cast_filter = sitk.CastImageFilter()
cast_filter.SetNumberOfThreads(0)
cast_filter.SetOutputPixelType(sitk.sitkFloat32)
sitk_img = cast_filter.Execute(sitk_img)
return sitk_img
TLDR:
Need help trying to calculate overlap region between 2 graphs.
So I'm trying to stitch these 2 images:
Since I know that the images I will be stitching definitely come from the same image, I feel that I should be able to code this up myself. Using libraries like OpenCV feels a little like overkill for me for this task.
My current idea is that I can simplify this task by doing the following steps for each image:
Load image using PIL
Convert image to black and white (PIL image mode “L”)
[Optional: crop images to overlapping region by inspection by eye]
Create vector row_sum, which is a sum of each row
[Optional: log row_sum, to reduce the size of values we're working with]
Plot row_sum.
This would reduce the (potentially) (3*2)-dimensional problem, with 3 RGB channels for each pixel on the 2D image to a (1*2)-D problem with the black and white pixel for the 2D image instead. Then, summing across the rows reduces this to a 1D problem.
I used the following code to implement the above:
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
class Stitcher():
def combine_2(self, img1, img2):
# thr1, thr2 = self.get_cropped_bw(img1, 115, img2, 80)
thr1, thr2 = self.get_cropped_bw(img1, 0, img2, 0)
row_sum1 = np.log(thr1.sum(1))
row_sum2 = np.log(thr2.sum(1))
self.plot_4x4(thr1, thr2, row_sum1, row_sum2)
def get_cropped_bw(self, img1, img1_keep_from, img2, img2_keep_till):
im1 = Image.open(img1).convert("L")
im2 = Image.open(img2).convert("L")
data1 = (np.array(im1)[img1_keep_from:]
if img1_keep_from != 0 else np.array(im1))
data2 = (np.array(im2)[:img2_keep_till]
if img2_keep_till != 0 else np.array(im2))
return data1, data2
def plot_4x4(self, thr1, thr2, row_sum1, row_sum2):
fig, ax = plt.subplots(2, 2, sharey="row", constrained_layout=True)
ax[0, 0].imshow(thr1, cmap="Greys")
ax[0, 1].imshow(thr2, cmap="Greys")
ax[1, 0].plot(row_sum1, "k.")
ax[1, 1].plot(row_sum2, "r.")
ax[1, 0].set(
xlabel="Index Value",
ylabel="Row Sum",
)
plt.show()
imgs = (r"combine\imgs\test_image_part_1.jpg",
r"combine\imgs\test_image_part_2.jpg")
s = Stitcher()
s.combine_2(*imgs)
This gave me this graph:
(I've added in those yellow boxes, to indicate the overlap regions.)
This is the bit I'm stuck at. I want to find exactly:
the index value of the left-side of the yellow box for the 1st image and
the index value of the right-side of the yellow box for the 2nd image.
I define the overlap region as the longest range for which the end of the 1st graph 'matches' the start of the 2nd graph. For the method to find the overlap region, what should I do if the row sum values aren't exactly the same (what if one is the other scaled by some factor)?
I feel like this could be a problem that could use dot products to find the similarity between the 2 graphs? But I can't think of how to implement this.
I had a lot more fun with this than I expected. I wrote this using opencv, but that's just to load and show the image. Everything else is done with numpy so swapping this to PIL shouldn't be too difficult.
I'm using a brute-force matcher. I also wrote a random-start hillclimber that runs in much less time, but I can't guarantee it'll find the correct answer since the gradient space isn't smooth. I won't include it in my code since it's long and janky, but if you really need the time efficiency I can add it back in later.
I added a random crop and some salt and pepper noise to the images to test for robustness.
The brute-force matcher operates on the idea that we don't know which section of the two images overlap, so we need to convolve the smaller image over the larger image from left to right, top to bottom. This means our search space is:
horizontal = small_width + big_width
vertical = small_height + big_height
area = horizontal * vertical
This will grow very quickly with image size. I motivate the algorithm by giving it points for having a larger overlap, but it loses more points for having differences in color for the overlapped area.
Here are some pictures from an execution of this program
import cv2
import numpy as np
import random
# randomly snips edges
def randCrop(image, maxMargin):
c = [random.randint(0,maxMargin) for a in range(4)];
return image[c[0]:-c[1], c[2]:-c[3]];
# adds noise to image
def saltPepper(image, minNoise, maxNoise):
h,w = image.shape;
randNum = random.randint(minNoise, maxNoise);
for a in range(randNum):
x = random.randint(0, w-1);
y = random.randint(0, h-1);
image[y,x] = random.randint(0, 255);
return image;
# evaluate layout
def getScore(one, two):
# do raw subtraction
left = one - two;
right = two - one;
sub = np.minimum(left, right);
return np.count_nonzero(sub);
# return 2d random position within range
def randPos(img, big_shape):
th,tw = big_shape;
h,w = img.shape;
x = random.randint(0, tw - w);
y = random.randint(0, th - h);
return [x,y];
# overlays small image onto big image
def overlay(small, big, pos):
# unpack
h,w = small.shape;
x,y = pos;
# copy and place
copy = big.copy();
copy[y:y+h, x:x+w] = small;
return copy;
# calculates overlap region
def overlap(one, two, pos_one, pos_two):
# unpack
h1,w1 = one.shape;
h2,w2 = two.shape;
x1,y1 = pos_one;
x2,y2 = pos_two;
# set edges
l1 = x1;
l2 = x2;
r1 = x1 + w1;
r2 = x2 + w2;
t1 = y1;
t2 = y2;
b1 = y1 + h1;
b2 = y2 + h2;
# go
left = max(l1, l2);
right = min(r1, r2);
top = max(t1, t2);
bottom = min(b1, b2);
return [left, right, top, bottom];
# wrapper for overlay + getScore
def fullScore(one, two, pos_one, pos_two, big_empty):
# check positions
x,y = pos_two;
h,w = two.shape;
th,tw = big_empty.shape;
if y+h > th or x+w > tw or x < 0 or y < 0:
return -99999999;
# overlay
temp_one = overlay(one, big_empty, pos_one);
temp_two = overlay(two, big_empty, pos_two);
# get overlap
l,r,t,b = overlap(one, two, pos_one, pos_two);
temp_one = temp_one[t:b, l:r];
temp_two = temp_two[t:b, l:r];
# score
diff = getScore(temp_one, temp_two);
score = (r-l) * (b-t);
score -= diff*2;
return score;
# do brute force
def bruteForce(one, two):
# calculate search space
# unpack size
h,w = one.shape;
one_size = h*w;
h,w = two.shape;
two_size = h*w;
# small and big
if one_size < two_size:
small = one;
big = two;
else:
small = two;
big = one;
# unpack size
sh, sw = small.shape;
bh, bw = big.shape;
total_width = bw + sw * 2;
total_height = bh + sh * 2;
# set up empty images
empty = np.zeros((total_height, total_width), np.uint8);
# set global best
best_score = -999999;
best_pos = None;
# start scrolling
ybound = total_height - sh;
xbound = total_width - sw;
for y in range(ybound):
print("y: " + str(y) + " || " + str(empty.shape));
for x in range(xbound):
# get score
score = fullScore(big, small, [sw,sh], [x,y], empty);
# show
# prog = overlay(big, empty, [sw,sh]);
# prog = overlay(small, prog, [x,y]);
# cv2.imshow("prog", prog);
# cv2.waitKey(1);
# compare
if score > best_score:
best_score = score;
best_pos = [x,y];
print("best_score: " + str(best_score));
return best_pos, [sw,sh], small, big, empty;
# do a step of hill climber
def hillStep(one, two, best_pos, big_empty, step):
# make a step
new_pos = best_pos[1][:];
new_pos[0] += step[0];
new_pos[1] += step[1];
# get score
return fullScore(one, two, best_pos[0], new_pos, big_empty), new_pos;
# hunt around for good position
# let's do a random-start hillclimber
def randHill(one, two, shape):
# set up empty images
big_empty = np.zeros(shape, np.uint8);
# set global best
g_best_score = -999999;
g_best_pos = None;
# lets do 200 iterations
iters = 200;
for a in range(iters):
# progress check
print(str(a) + " of " + str(iters));
# start with random position
h,w = two.shape[:2];
pos_one = [w,h];
pos_two = randPos(two, shape);
# get score
best_score = fullScore(one, two, pos_one, pos_two, big_empty);
best_pos = [pos_one, pos_two];
# hill climb (only on second image)
while True:
# end condition: no step improves score
end_flag = True;
# 8-way
for y in range(-1, 1+1):
for x in range(-1, 1+1):
if x != 0 or y != 0:
# get score and update
score, new_pos = hillStep(one, two, best_pos, big_empty, [x,y]);
if score > best_score:
best_score = score;
best_pos[1] = new_pos[:];
end_flag = False;
# end
if end_flag:
break;
else:
# show
# prog = overlay(one, big_empty, best_pos[0]);
# prog = overlay(two, prog, best_pos[1]);
# cv2.imshow("prog", prog);
# cv2.waitKey(1);
pass;
# check for new global best
if best_score > g_best_score:
g_best_score = best_score;
g_best_pos = best_pos[:];
print("top score: " + str(g_best_score));
return g_best_score, g_best_pos;
# load both images
top = cv2.imread("top.jpg");
bottom = cv2.imread("bottom.jpg");
top = cv2.cvtColor(top, cv2.COLOR_BGR2GRAY);
bottom = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY);
# randomly crop
top = randCrop(top, 20);
bottom = randCrop(bottom, 20);
# randomly add noise
saltPepper(top, 200, 1000);
saltPepper(bottom, 200, 1000);
# set up max image (assume no overlap whatsoever)
tw = 0;
th = 0;
h, w = top.shape;
tw += w;
th += h;
h, w = bottom.shape;
tw += w*2;
th += h*2;
# do random-start hill climb
_, best_pos = randHill(top, bottom, (th, tw));
# show
empty = np.zeros((th, tw), np.uint8);
pos1, pos2 = best_pos;
image = overlay(top, empty, pos1);
image = overlay(bottom, image, pos2);
# do brute force
# small_pos, big_pos, small, big, empty = bruteForce(top, bottom);
# image = overlay(big, empty, big_pos);
# image = overlay(small, image, small_pos);
# recolor overlap
h,w = empty.shape;
color = np.zeros((h,w,3), np.uint8);
l,r,t,b = overlap(top, bottom, pos1, pos2);
color[:,:,0] = image;
color[:,:,1] = image;
color[:,:,2] = image;
color[t:b, l:r, 0] += 100;
# show images
cv2.imshow("top", top);
cv2.imshow("bottom", bottom);
cv2.imshow("overlayed", image);
cv2.imshow("Color", color);
cv2.waitKey(0);
Edit: I added in the random-start hillclimber
`n = 3
array = np.ones((n,n)) / (n*n)
n = array.shape[0] * array.shape1
while(True):
ret, frame = cap.read()
if ret is True:
print("newframe")
gframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dst = cv2.copyMakeBorder(gframe, 1, 1, 1, 1, borderType, None, None)
blur = cv2.blur(dst,(3,3))
if k == 1 :
lastframe = gframe
curframe = gframe
nextframe = gframe
newFrame = gframe
k = 0
else :
lf = ndimage.convolve(lastframe, array, mode='constant', cval= 0.0)
cf = ndimage.convolve(curframe, array, mode='constant', cval= 0.0)
nf = ndimage.convolve(nextframe, array, mode='constant', cval= 0.0)
lastframe = curframe
curframe = nextframe
nextframe = gframe
b = np.zeros((3, 528, 720))
b[0] = lf
b[1] = cf
b[2] = nf
result = np.mean(b, axis=0)
cv2.imshow('frame',result)
cv2.imshow('frame2',gframe)
`enter image description here
I am trying to add all pixel values of a 3x3 pixel and then average them. I need to do that for every pixel and every frame and replace the primary pixel with the averaged one. However the way i am trying to do it makes it really slow and not really accurate.
This sounds like a convolution.
import numpy as np
from scipy import ndimage
a = np.random.random((5, 5))
a
[[0.14742615 0.83548453 0.67433445 0.59162829 0.21160044]
[0.1700598 0.89074466 0.84155171 0.65092969 0.3842437 ]
[0.22662423 0.2266929 0.47757456 0.34480112 0.06261333]
[0.89402116 0.00101947 0.90503461 0.93112109 0.44817247]
[0.21788789 0.3338606 0.07323461 0.28944439 0.91217591]]
Convolution operation with window size 3x3
n = 3
k = np.ones((n, n)) / (n * n)
n = k.shape[0] * k.shape[1]
b = ndimage.convolve(a, k, mode='constant', cval=0.0)
b
[[0.22707946 0.39551126 0.49829704 0.3726987 0.2042669 ]
[0.27744803 0.49894366 0.61486021 0.47103081 0.24953517]
[0.26768469 0.51481368 0.58549664 0.56067136 0.31354238]
[0.21112292 0.37288334 0.39808704 0.4937969 0.33203648]
[0.16075435 0.26945093 0.28152386 0.39546479 0.28676821]]
Now you just have to do it for the current frame, and the two prior frames.
-------- EDIT: For three frames -----------
For 3D you could write a convolution function like in this post, but its quite complex as it uses FFTs
If you just want to average across three frames, you could do:
f1 = np.random.random((5, 5)) # Frame1
f2 = np.random.random((5, 5)) # Frame2
f3 = np.random.random((5, 5)) # Frame3
n = 3
k = np.ones((n, n)) / (n * n)
n = k.shape[0] * k.shape[1]
b0 = ndimage.convolve(f1, k, mode='constant', cval=0.0)
b1 = ndimage.convolve(f2, k, mode='constant', cval=0.0)
b2 = ndimage.convolve(f3, k, mode='constant', cval=0.0)
# Create a 3D Matrix, with each fame placed along the first dimension
b = np.zeros((3, 5, 5))
b[0] = b0
b[1] = b1
b[2] = b2
# Take the average across the first dimension (across frames)
result = np.mean(b, axis=0)
There probably is a more elegant solution than this, but it gets the job done.
-------- EDIT: For Movies -----------
Based on all the questions in the comments I've decided to attempt to add some more code to help with implementation.
Firstly I'm starting out with these 7 consecutive stills from a movie:
I have not verified that the following code is bug proof or actually returns the correct result.
import cv2
import numpy as np
from scipy import ndimage
# this is a function to do previous code
def mean_frames(frames, kernel):
b = np.zeros(frames.shape)
for i in range(frames.shape[0]):
b[i] = ndimage.convolve(frames[i], k, mode='constant', cval=0.0)
b = np.mean(b, axis=0) / frames.shape[0]
return b
mean_N = 3 # frames to average
# read in 1 file to get dimenions
im = cv2.imread(f'{root}1.png', cv2.IMREAD_GRAYSCALE)
# setup numpy matrix that will hold mean_N frames at a time
frames = np.zeros((mean_N, im.shape[0], im.shape[1]))
avg_frames = [] # list to store our 3 averaged frames
count = 0 # counter to position frames in 1st dim of 3D matrix for avg
k = np.ones((3, 3)) / (3 * 3) # kernel for 2D convolution
for j in range(1, 7): # 7 images
file_name = root + str(j) + '.png'
im = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
frames[count, ::] = im # store in 3D matrix
# if loaded more than min req. for avg, we average
if j >= mean_N:
# average and store to list
avg_frames.append(mean_frames(frames, k))
# if the count is mean_N - 1, that means we need to replace
# the 0th matrix in frames so that we are doing a 'moving avg'
if count == (mean_N - 1):
count = 0
else:
count += 1 #increase position in 0th dim for 3D matrix storage
# ouput averaged frames
for i, f in enumerate(avg_frames):
cv2.imwrite(f'{path}output{i}.jpg', f)
Then looking at the folder, there are 5 files (as expected if we did a moving average of 3 frames over 7 stills:
looking at before and after:
Image 3:
and averaged image #1:
The image not only is in gray scale (as expected) but seems quite dark. Perhaps some brightening would make things look better/more apparent.
Your question is very interesting.
I saw that you use many loops for activating this function. Let's process analysis.
Just for a frame.
You want to add all pixel values of a 3x3 pixel neighborhood. So I think Image interpolation is very suitable for this case. In OpenCV, we use resize() to interpolate pixel for image. So the INTER_NEAREST is best for this situation.
This is the formula for INTER_NEAREST.
Now you get the pixel added image.
Then you want to do that for every pixel and every frame and replace the primary pixel with the average one. And I think the Average filtering is a better solution.
The filter will work every pixel.
The code of a temporary example.
Interpolation
img = cv2.resize(img, (img.size[0]*3, img.size[1]*3), cv2.INTER_NEAREST)
Filter
img = cv2.blur(img, (3, 3))
I'm trying to implement Reinhard's method to use the color distribution of a target image to color normalize a passed in image for a research project. I've gotten the code to work and it outputs correctly but it's pretty slow. It takes about 20 minutes to iterate through 300 images. I'm pretty sure the bottleneck is how I'm handling applying the function to each image. I'm currently iterating through each pixel of the image and applying the functions below to each channel.
def reinhard(target, img):
#converts image and target from BGR colorspace to l alpha beta
lAB_img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
lAB_tar = cv2.cvtColor(target, cv2.COLOR_BGR2Lab)
#finds mean and standard deviation for each color channel across the entire image
(mean, std) = cv2.meanStdDev(lAB_img)
(mean_tar, std_tar) = cv2.meanStdDev(lAB_tar)
#iterates over image implementing formula to map color normalized pixels to target image
for y in range(512):
for x in range(512):
lAB_tar[x, y, 0] = (lAB_img[x, y, 0] - mean[0]) / std[0] * std_tar[0] + mean_tar[0]
lAB_tar[x, y, 1] = (lAB_img[x, y, 1] - mean[1]) / std[1] * std_tar[1] + mean_tar[1]
lAB_tar[x, y, 2] = (lAB_img[x, y, 2] - mean[2]) / std[2] * std_tar[2] + mean_tar[2]
mapped = cv2.cvtColor(lAB_tar, cv2.COLOR_Lab2BGR)
return mapped
My supervisor told me that I could try using a matrix to apply the function all at once to improve the runtime but I'm not exactly sure how to go about doing that.
The original and the target:
Color transfer reuslts using Reinhard'method in 5 ms:
I prefer to implement the formulat in numpy vectorized operations other than python loops.
# implementing the formula
#(Io - mo)/so*st + mt = Io * (st/so) + mt - mo*(st/so)
ratio = (std_tar/std_ori).reshape(-1)
offset = (mean_tar - mean_ori*std_tar/std_ori).reshape(-1)
lab_tar = cv2.convertScaleAbs(lab_ori*ratio + offset)
Here is the code:
# 2019/02/19 by knight-金
# https://stackoverflow.com/a/54757659/3547485
import numpy as np
import cv2
def reinhard(target, original):
# cvtColor: COLOR_BGR2Lab
lab_tar = cv2.cvtColor(target, cv2.COLOR_BGR2Lab)
lab_ori = cv2.cvtColor(original, cv2.COLOR_BGR2Lab)
# meanStdDev: calculate mean and stadard deviation
mean_tar, std_tar = cv2.meanStdDev(lab_tar)
mean_ori, std_ori = cv2.meanStdDev(lab_ori)
# implementing the formula
#(Io - mo)/so*st + mt = Io * (st/so) + mt - mo*(st/so)
ratio = (std_tar/std_ori).reshape(-1)
offset = (mean_tar - mean_ori*std_tar/std_ori).reshape(-1)
lab_tar = cv2.convertScaleAbs(lab_ori*ratio + offset)
# convert back
mapped = cv2.cvtColor(lab_tar, cv2.COLOR_Lab2BGR)
return mapped
if __name__ == "__main__":
ori = cv2.imread("ori.png")
tar = cv2.imread("tar.png")
mapped = reinhard(tar, ori)
cv2.imwrite("mapped.png", mapped)
mapped_inv = reinhard(ori, tar)
cv2.imwrite("mapped_inv.png", mapped)
I managed to figure it out after looking at the numpy documentation. I just needed to replace my nested for loop with proper array accessing. It took less than a minute to iterate through all 300 images with this.
lAB_tar[:,:,0] = (lAB_img[:,:,0] - mean[0])/std[0] * std_tar[0] + mean_tar[0]
lAB_tar[:,:,1] = (lAB_img[:,:,1] - mean[1])/std[1] * std_tar[1] + mean_tar[1]
lAB_tar[:,:,2] = (lAB_img[:,:,2] - mean[2])/std[2] * std_tar[2] + mean_tar[2]
In my code, I am trying to import a grayscale image (2D array) and then solve for the optical density (OD) based off an empirical formula I came up with. The optical density has a relationship with the grayscale value where OD = 0.51*((f-22.08)/(176.09-f))**(1./-1.519) where f is the grayscale value of each element in the array. Then, I converted it into an RGB image.
My problem is I am trying to run each individual element of the image array into an if statement. It does not enter the statement though. What I want to do is increase the intensity of each individual element or pixel value in R, G, and B based on what condition is met with the optical density. Say if it has an OD value that falls between b and c, it adds [128,0,0] to each element that satisfies that criteria.
t = Image.open("IMG_1.jpg").convert('L') #grayscale image
f = array(t) #Convert test image into an array
OD = 0.51*((f-22.08)/(176.09-f))**(1./-1.519) #Empirical Rodbard formula
OD[np.isnan(OD)] = 0
def to_rgb5(im):
OD.resize((OD.shape[0], OD.shape[1], 1))
return np.repeat(OD.astype(np.uint8), 3, 2)
cmap = plt.get_cmap('jet')
rgba_img = cmap(OD)
rgb_img = np.delete(rgba_img, 3, 2)
a = 0.08
b = 0.11
c = 0.15
if np.all(OD < a):
background_noise = rgb_img
if np.all(OD < b):
small = rgb_img + [128, 0, 0]
elif np.all(OD >= c):
large = rgb_img + [0, 0, 128]
Red = f + small
Green = f
Blue = f + large
I was able to get it work using the np.where suggested by #Stuart
t = Image.open("IMG_1.jpg").convert('L') #grayscale image
f = array(t) #Convert test image into an array
OD = 0.51*((f-22.08)/(176.09-f))**(1./-1.519) #Empirical Rodbard formula
OD[np.isnan(OD)] = 0
thB = 0.02
ththin = 0.11
ththick = 0.15
GSb = 162
GSthin = 150
GSthick = 145
if np.where(OD < ththin):
thresholded = threshold(f, GSthin, GSb)
def to_rgb1(thresholded):
thresholded.resize((thresholded.shape[0], thresholded.shape[1], 1))
return np.repeat(thresholded.astype(np.uint8), 3, 2)
cmap = plt.get_cmap('jet')
rgba_img1 = cmap(thresholded)
rgb_img1 = np.delete(rgba_img1, 3, 2)
view = rgb_img1[:, :, 2]
view[view < 0.1] += 128
thin = rgb_img1
if np.where(OD > ththick):
thresholded2 = threshold(f, threshmax = GSthick)
def to_rgb2(thresholded2):
thresholded2.resize((thresholded2.shape[0], thresholded2.shape[1], 1))
return np.repeat(thresholded2.astype(np.uint8), 3, 2)
cmap = plt.get_cmap('jet')
rgba_img2 = cmap(thresholded2)
rgb_img2 = np.delete(rgba_img2, 3, 2)
view2 = rgb_img2[:, :, 0]
view2[view2 > 0] += 128
thick = rgb_img2