What is wrong with my focus stacking algorithm? - python

You can see in the final focus stacked image that the whole image is in focus. However, pieces of the image are missing and I have no clue why. The basic steps of my algorithm are:
Access images. Convert images to grayscale, blur the gray images a bit, then find the Laplacian of these images. I store all Laplaced images in a list.
Cycle through pixels in a blank image using for loops. Every iteration creates a list containing the pixel intensities of the gray, blurred, Laplaced images at that pixel value. Find the max pixel intensity. Then look at the BGR value of the ORIGINAL image where the max pixel intensity came from. Set the BGR value of the blank pixel equal to that of the max-intensity pixel.
Here is my code:
images = glob2.glob("Pics\\step*") # Accesses images in the Pics folder
laps = [] # A list to contain Laplacians of images in Pics
i=0
for image in images:
img = cv.imread(image) # Reads image in Pics
images[i] = img # Bc line 6 only creates a list of image NAMES (ie strings), not actual images, this replaces string w image
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Converts image to grayscale
gauss = cv.GaussianBlur(img, (3,3), 0) # Blurs grayed image a bit
lap = cv.Laplacian(gauss, cv.CV_64F) # Converts blurred, gray image to Laplacian
lap = np.uint8(np.absolute(lap)) # Converts to Laplacian
laps.append(lap) # Adds Laplacian to laps
i += 1
sample = laps[0] # Arbitrarily accesses the first image in laps so that we can get dimensions for line 22
fs = np.zeros((sample.shape[0], sample.shape[1], 3), dtype='uint8') # Creates a blank image with the dimensions of sample
for x in range(sample.shape[0]): # The for loops go through every x and y value
for y in range(sample.shape[1]):
intensities = [laps[0][x,y], laps[1][x,y], laps[2][x,y], laps[3][x,y], laps[4][x,y], laps[5][x,y]] # List of intensities of laplacian images
color = images[intensities.index(max(intensities))][x,y] # Finds BGR value of the x,y pixel in the ORIGINAL image corresponding to the highest intensity
fs[x, y] = color # Sets pixel of blank fs image to the color of the pixel with the strongest intensity
cv.imshow('FS', fs)
Here is what the code produces:
Broken Focus Stacked Image

I was inspired by your code and made this simple script, which seems to work fine. (I do not need to align images.) Using mask to select pixels in focus may be faster, but I haven't tried to compare both versions. I would appreciate any advice on how to improve it.
from pathlib import Path
from imageio import imread, imwrite
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2hsv, rgb2gray
from skimage import img_as_float, img_as_ubyte
from scipy.ndimage.filters import gaussian_filter
from skimage.filters.rank import gradient
from skimage.morphology import disk
im_dir = Path("test")
sigma = 3
print("_____ load images _____")
fps = [f for f in im_dir.glob("*.jpg")]
print([f.name for f in fps])
images_rgb = [imread(f) for f in fps]
images_rgb_cube = np.array(images_rgb)
print("images_rgb_cube", images_rgb_cube.shape, images_rgb_cube.dtype)
print("_____ images to grey _____")
#images_grey = [rgb2hsv(im)[:,:,2] for im in ims] # slow
images_grey = [rgb2gray(im) for im in images_rgb] # faster
print("_____ get gradients _____")
selection_element = disk(sigma) # matrix of n pixels with a disk shape
grads = [gradient(im, selection_element) for im in images_grey]
grads = np.array(grads)
print("grads", grads.shape, grads.dtype)
print("_____ get mask _____")
mask_grey = grads.max(axis=0, keepdims=1) == grads # https://stackoverflow.com/questions/47678252/mask-from-max-values-in-numpy-array-specific-axis
mask_rgb = np.repeat(mask_grey[:, :, :, np.newaxis], 3, axis=3)
print("mask_rgb", mask_rgb.shape, mask_rgb.dtype)
print("_____ apply mask _____")
image_sharp = images_rgb_cube * mask_rgb
image_sharp = image_sharp.max(axis=0)
print("image_sharp", image_sharp.shape, image_sharp.dtype)
print("_____ save image _____")
imwrite(im_dir / "stacked.jpeg", image_sharp)
plt.imshow(image_sharp)
plt.show()
print("_____ save masks _____")
print("mask_grey", mask_grey.shape, mask_grey.dtype)
for i in range(mask_grey.shape[0]):
mask = mask_grey[i]
fp = im_dir / "{}_mask.jpeg".format(fps[i].stem)
imwrite(fp, img_as_ubyte(mask))
print("saved", fp, mask.shape, mask.dtype)

Related

creating a neon-glow with python numpy

I'm trying to create a neon-effect w/ a source image. I have included three images, the source, my current attempt & a target. The program takes the image, finds the white-edges, & calculates the distance from each pixel to the nearest white-edge (these parts both work fine); from there, I am struggling to find the right saturation and value parameters to create the neon-glow.
From the target image, what I need to happen is basically for the saturation to be 0 on a white-edge, then to dramatically increase the further away it gets from an edge; for value, I need it to be 1 on a white-edge, then to dramatically decrease. I can't figure out the best way to manipulate distance_image (which holds each pixel's distance from the nearest white-edge) such as to achieve these two results with saturation and value.
from PIL import Image
import cv2
import numpy as np
from scipy.ndimage import binary_erosion
from scipy.spatial import KDTree
def find_closest_distance(img):
white_pixel_points = np.array(np.where(img))
tree = KDTree(white_pixel_points.T)
img_meshgrid = np.array(np.meshgrid(np.arange(img.shape[0]),
np.arange(img.shape[1]))).T
distances, _ = tree.query(img_meshgrid)
return distances
def find_edges(img):
img_np = np.array(img)
kernel = np.ones((3,3))
return img_np - binary_erosion(img_np, kernel)*255
img = Image.open('a.png').convert('L')
edge_image = find_edges(img)
distance_image = find_closest_distance(edge_image)
max_dist = np.max(distance_image)
distance_image = distance_image / max_dist
hue = np.full(distance_image.shape, 0.44*180)
saturation = distance_image * 255
value = np.power(distance_image, 0.2)
value = 255 * (1 - value**2)
new_tups = np.dstack((hue, saturation, value)).astype('uint8')
new_tups = cv2.cvtColor(new_tups, cv2.COLOR_HSV2BGR)
new_img = Image.fromarray(new_tups, 'RGB').save('out.png')
The following images show the source data (left), the current result (middle), and the desired result (right).
I think I would do this with convolution instead. Convolving an image with a Gaussian kernel is a common way to blur an image. You can do it in various ways, but maybe the easiest to use is scipy.ndimage.gaussian_filter. Here's one way to implement all this, see if you like the result.
from PIL import Image
from io import BytesIO
import requests
import numpy as np
r = requests.get('https://i.stack.imgur.com/MhUQZ.png')
img = Image.open(BytesIO(r.content))
imarray = np.asarray(img)[..., 0] / 255
This is your first image, the white rectangles.
Now I'll make those outlines, do the blur, create the colour images, and combine them:
from scipy.ndimage import binary_erosion
from scipy.ndimage import gaussian_filter
eroded = binary_erosion(imarray, iterations=3)
# Make the outlined rectangles.
outlines = imarray - eroded
# Convolve with a Gaussian to effect a blur.
blur = gaussian_filter(outlines, sigma=11)
# Make binary images into neon green.
neon_green_rgb = [0.224, 1.0, 0.0784]
outlines = outlines[:, :, None] * neon_green_rgb
blur = blur[:, :, None] * neon_green_rgb
# Combine the images and constrain to [0, 1].
blur_strength = 3
glow = np.clip(outlines + blur_strength*blur, 0, 1)
And look at it:
import matplotlib.pyplot as plt
plt.imshow(glow)
You'll want to adjust the sigma of the Gaussian (its width), the colours, blur strength, and so on. Hope it helps.
Here is one way to do that in Python/OpenCV.
Read the input
Convert to grayscale
Threshold to binary
Get edges of desired thickness using morphology gradient
Invert the edges so black on white background
Do distance transform
Stretch to full dynamic range
Invert
Normalize to range 0 to 1 by dividing by the maximum value
Attenuate using a power law to control distance roll-off (ramping)
Create a color image of the size of the input and the desired color
Multiply the attenuated image by the color image
Save results
Input:
import cv2
import numpy as np
import skimage.exposure
# read input
img = cv2.imread('rectangles.png')
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# do morphology gradient to get edges and invert so black edges on white background
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
edges = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel)
edges = 255 - edges
# get distance transform
dist = edges.copy()
distance = cv2.distanceTransform(dist, distanceType=cv2.DIST_L2, maskSize=3)
print(np.amin(distance), np.amax(distance))
# stretch to full dynamic range and convert to uint8 as 3 channels
stretch = skimage.exposure.rescale_intensity(distance, in_range=('image'), out_range=(0,255))
# invert
stretch = 255 - stretch
max_stretch = np.amax(stretch)
# normalize to range 0 to 1 by dividing by max_stretch
stretch = (stretch/max_stretch)
# attenuate with power law
pow = 4
attenuate = np.power(stretch, pow)
attenuate = cv2.merge([attenuate,attenuate,attenuate])
# create a green image the size of the input
color_img = np.full_like(img, (0,255,0), dtype=np.float32)
# multiply the color image with the attenuated distance image
glow = (color_img * attenuate).clip(0,255).astype(np.uint8)
# save results
cv2.imwrite('rectangles_edges.png', edges)
cv2.imwrite('rectangles_stretch.png', (255*stretch).clip(0,255).astype(np.uint8))
cv2.imwrite('rectangles_attenuate.png', (255*attenuate).clip(0,255).astype(np.uint8))
cv2.imwrite('rectangles_glow.png', glow)
# view results
cv2.imshow("EDGES", edges)
cv2.imshow("STRETCH", stretch)
cv2.imshow("ATTENUATE", attenuate)
cv2.imshow("RESULT", glow)
cv2.waitKey(0)
Edges (inverted):
Stretched Distance Transform:
Attenuated Distance Transform:
Glow Result:

saving image using matplotlib in python

I'm trying to make a simple code that loads an image, divide the value of each pixel by 2 and stores the image. The image is stored in an array [1280][720][3]. After changing the value of each pixel I've chequed that the values are the expected. For some reason the values are correct but when I store the new image and check it, the values of the pixels are not the same as before...
The image is 1280x720 pixels and each pixel has 3 bytes (one for each color rgb)
import matplotlib.image as mpimg
img = mpimg.imread('image.jpg') # (1280, 720, 3)
myImg = []
for row in img:
myRow = []
for pixel in row:
myPixel = []
for color in pixel:
myPixel.append(color // 2)
myRow.append(myPixel)
myImg.append(myRow)
mpimg.imsave("foo.jpg", myImg)
img is a numpy array, so you can just use img / 2. It's also much faster than using a list loop.
myImg = img / 2
mpimg.imsave("foo.jpg", myImg)

convert IR image to RGB with python

The code below is intended to take an infrared image (B&W) and convert it to RGB. It does so successfully, but with significant noise. I have included a few lines for noise reduction but they don't seem to help. I've included the starting/resulting photos below. Any advice/corrections are welcome and thank you in advance!
from skimage import io
import numpy as np
import glob, os
from tkinter import Tk
from tkinter.filedialog import askdirectory
import cv2
path = askdirectory(title='Select PNG Folder') # shows dialog box and return the path
outpath = askdirectory(title='Select SAVE Folder')
# wavelength in microns
MWIR = 4.5
R = .642
G = .532
B = .44
vector = [R, G, B]
vectorsum = np.sum(vector)
vector = vector / vectorsum #normalize
vector = vector*255 / MWIR #changing this value changes the outcome significantly so I
#have been messing with it in the hopes of fixing it but no luck so far.
vector = np.power(vector, 4)
for file in os.listdir(path):
if file.endswith(".png"):
imIn = io.imread(os.path.join(path, file))
imOut = imIn * vector
ret,thresh = cv2.threshold(imOut,64,255,cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint8)
erode = cv2.erode(thresh, kernel, iterations = 1)
result = cv2.bitwise_or(imOut, erode)
io.imsave(os.path.join(outpath, file) + '_RGB.png',imOut.astype(np.uint8))
Your noise looks like completely random values, so I suspect you have an error in your conversion from float to uint8. But instead of rolling everything for yourself, why don't you just use:
imOut = cv2.cvtColor(imIn,cv2.COLOR_GRAY2BGR)
Here is one way to do that in Python/OpenCV.
Your issue is likely that your channel values are exceeding the 8-bit range.
Sorry, I do not understand the relationship between your R,G,B weights and your MWIR. Dividing by MWIR will do nothing if your weights are properly normalized.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('car.jpg')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# make color channels
red = gray.copy()
green = gray.copy()
blue = gray.copy()
# set weights
R = .642
G = .532
B = .44
MWIR = 4.5
# get sum of weights and normalize them by the sum
R = R**4
G = G**4
B = B**4
sum = R + G + B
R = R/sum
G = G/sum
B = B/sum
print(R,G,B)
# combine channels with weights
red = (R*red)
green = (G*green)
blue = (B*blue)
result = cv2.merge([red,green,blue])
# scale by ratio of 255/max to increase to fully dynamic range
max=np.amax(result)
result = ((255/max)*result).clip(0,255).astype(np.uint8)
# write result to disk
cv2.imwrite("car_colored.png", result)
# display it
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result
If the noise is coming from the sensor itself, like a grainy noise, you'll need to look into denoising algorithms. scikit-image and opencv provide some denoising algorithms you can try. Maybe take a look at this and this.
I recently learned about matplotlib.cm, which handles colormaps. I've been using those to artificially color IR images, and made a brief example using the same black & white car image used above. Basically, I create a colormap .csv file locally, then refer to it for RGB weights. You may have to pick and choose which colormap you prefer, but that's up to personal preference.
Input image:
Python:
import os
import numpy as np
import cv2
from matplotlib import cm
# Multiple colormap options are available- I've hardcoded viridis for this example.
colormaps = ["viridis", "plasma", "inferno", "magma", "cividis"]
def CreateColormap():
if not os.path.exists("viridis_colormap.csv"):
# Get 256 entries from "viridis" or any other Matplotlib colormap
colormap = cm.get_cmap("viridis", 256)
# Make a Numpy array of the 256 RGB values
# Each line corresponds to an RGB colour for a greyscale level
np.savetxt("viridis_colormap.csv", (colormap.colors[...,0:3]*255).astype(np.uint8), fmt='%d', delimiter=',')
def RecolorInfraredImageToRGB(ir_image):
# Load RGB lookup table from CSV file
lookup_table = np.loadtxt("viridis_colormap.csv", dtype=np.uint8, delimiter=",")
# Make output image, same height and width as IR image, but 3-channel RGB
result = np.zeros((*ir_image.shape, 3), dtype=np.uint8)
# Take entries from RGB LUT according to greyscale values in image
np.take(lookup_table, ir_image, axis=0, out=result)
return result
if __name__ == "__main__":
CreateColormap()
img = cv2.imread("bwcar.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
recolored = RecolorInfraredImageToRGB(gray)
cv2.imwrite("car_recolored.png", recolored)
cv2.imshow("Viridis recolor", recolored)
cv2.waitKey(0)
Output:

How can I soften just the edges of this image

I am using python and opencv to cut an image using a mask. The mask itself is quite jagged and so the resulting image becomes a bit jagged around the edges like below
Jagged image
Is there a way I can smooth out the edges so they look more like this without affecting the rest of the image?
Smoothed edge
Thanks
SoS
** UPDATE **
Added the original jagged image without the annotation
Original Jagged image
Here is one way using OpenCV, Numpy and Skimage. I assume you actually have an image with a transparent background and not just checkerboard pattern.
Input:
import cv2
import numpy as np
import skimage.exposure
# load image with alpha channel
img = cv2.imread('lena_circle.png', cv2.IMREAD_UNCHANGED)
# extract only bgr channels
bgr = img[:, :, 0:3]
# extract alpha channel
a = img[:, :, 3]
# blur alpha channel
ab = cv2.GaussianBlur(a, (0,0), sigmaX=2, sigmaY=2, borderType = cv2.BORDER_DEFAULT)
# stretch so that 255 -> 255 and 127.5 -> 0
aa = skimage.exposure.rescale_intensity(ab, in_range=(127.5,255), out_range=(0,255))
# replace alpha channel in input with new alpha channel
out = img.copy()
out[:, :, 3] = aa
# save output
cv2.imwrite('lena_circle_antialias.png', out)
# Display various images to see the steps
# NOTE: In and Out show heavy aliasing. This seems to be an artifact of imshow(), which did not display transparency for me. However, the saved image looks fine
cv2.imshow('In',img)
cv2.imshow('BGR', bgr)
cv2.imshow('A', a)
cv2.imshow('AB', ab)
cv2.imshow('AA', aa)
cv2.imshow('Out', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
I am by no means an expert with OpenCV. I looked at cv2.normalize(), but it did not look like I could provide my own sets of input and output values. So I also tried using the following adding the clipping to be sure there were no over-flows or under-flows:
aa = a*2.0 - 255.0
aa[aa<0] = 0
aa[aa>0] = 255
where I computed that from solving simultaneous equations such that in=255 becomes out=255 and in=127.5 becomes out=0 and doing a linear stretch between:
C = A*X+B
255 = A*255+B
0 = A*127.5+B
Thus A=2 and B=-127.5
But that does not work nearly as well as skimage rescale_intensity.
These are some effects you can do with the PIL image library:
from PIL import Image, ImageFilter
im_1 = Image.open("/constr/pics1/russian_doll.png")
im_2 = im_1.filter(ImageFilter.BLUR)
im_3 = im_1.filter(ImageFilter.CONTOUR)
im_4 = im_1.filter(ImageFilter.DETAIL)
im_5 = im_1.filter(ImageFilter.EDGE_ENHANCE)
im_6 = im_1.filter(ImageFilter.EDGE_ENHANCE_MORE)
im_7 = im_1.filter(ImageFilter.EMBOSS)
im_8 = im_1.filter(ImageFilter.FIND_EDGES)
im_9 = im_1.filter(ImageFilter.SMOOTH)
im_10 = im_1.filter(ImageFilter.SMOOTH_MORE)
im_11 = im_1.filter(ImageFilter.SHARPEN)
# now save the images
im_2.save("/constr/picsx/russian_dol_BLUR.png")
im_3.save("/constr/picsx/russian_doll_CONTOUR.png")
im_4.save("/constr/picsx/russian_doll_DETAIL.png")
im_5.save("/constr/picsx/russian_doll_EDGE_ENHANCE.png")
im_6.save("/constr/picsx/russian_doll_EDGE_ENHANCE_MORE.png")
im_7.save("/constr/picsx/russian_doll_EMBOSS.png")
im_8.save("/constr/picsx/russian_doll_FIND_EDGES.png")
im_9.save("/constr/picsx/russian_doll_SMOOTH.png")
im_10.save("/constr/picsx/russian_doll_SMOOTH_MORE.png")
im_11.save("/constr/picsx/russian_doll_SHARPEN.png")

How to get histogram of intensity of individual masked cells in an image?

OK so newbie here that has been working on a set of homework problems with the original post here: How do I make a mask from one image and then transfer it to another?
. The original idea was to take the DAPI image (grey image) and apply it as a mask to the NPM1 (green) image. After implementing the suggested code from HansHirse (thanks!) along with some other code I had been making for the homework problem I finally got a working histogram of all compatible cells in the image. The "compatibility" bit is that any cells touching the border weren't supposed to be counted. However, I still need to find a way to get histograms of each individual cell as well. I've attached the original images from the post too:
To do this, I tried blob_doh and one other method to get segmented regions of each cell but have no idea as to how I can apply these coordinates to an image for the histogram.
PS. The code is a bit messy. I segmented the code such that the blob_doh is near the bottom and the other method is also its own separate piece at the very bottom. Sorry!
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import cv2
import mahotas as mh
import scipy
from scipy import ndimage
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
# Read image into numpy array
image = cv2.imread("NOTREATDAPI.jpg",0)
dna = np.array(image) # must be gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
epsilon = np.array(cleared) #final masked DAPI product
print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Load and reset original images
image = cv2.imread("NOTREATDAPI.jpg",0) #The DAPI Image
image1 = cv2.imread("NOTREATNPM1.jpg",0) #The NPM1 Image
print("Original DAPI Image");plt.imshow(image);plt.show() #The DAPI Image
print("Original NPM1 Image");plt.imshow(image1);plt.show() #The NPM1 Image
# Create an array of bool of same shape as image
maskAboveThreshold = epsilon > 0 #Use mask array from above - include only values above non-masked zeros
print("Final Masked Image of NPM1"); plt.imshow(image1 *
maskAboveThreshold, cmap='gray')
plt.show()
True_NPM1= image1 * maskAboveThreshold # Final masked version of NPM1 set back to grayscale
# Create a mask using the DAPI image and binary thresholding at 25
_, mask = cv2.threshold(True_NPM1, 1, 255, cv2.THRESH_BINARY)
# Do some morphological opening to get rid of small artifacts
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))
# Calculate the histogram using the NPM1 image and the obtained binary
mask
hist = cv2.calcHist([image1], [0], mask, [256], [0, 256])
# Show bar plot of calculated histogram
plt.bar(np.arange(256), np.squeeze(hist))
plt.show()
# Show mask image
plt.imshow(mask)
plt.show()
#blob_doh way of segmenting the cells ------
import cv2 as cv
from PIL import Image, ImageDraw
image10 = np.array(Image.open("OXALIDAPI.jpg"))
plt.imshow(image10)
#Convert to gaussian image with thresholds
image10 = cv2.imread("OXALIDAPI.jpg",0)
dna = np.array(image10) # gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
image = np.array(cleared) #final masked DAPI product
#print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Convert image to grayscale
image_gray = rgb2gray(image)
plt.imshow(image_gray,cmap="gray")
def plot_blobs(img,blobs):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r*1.25, color="red", linewidth=1, fill=False)
ax.add_patch(c)
# blob_doh
blobs_doh = blob_doh(image_gray, min_sigma=10, max_sigma=256,
threshold=.025)
plot_blobs(image,blobs_doh)
#get blob coordinates
def filter_blobs(blobs,r_cutoff=5):
new_blobs = []
for b in blobs:
if b[2] > r_cutoff:
new_blobs.append(b)
return new_blobs
new_blobs = filter_blobs(blobs_doh)
#plot_blobs(image,new_blobs)
print(new_blobs)
#Other method of segmenting cells. maybe useful?
yeta = cv2.imread("NOTREATDAPI.jpg",0)
image = np.array(yeta)
# apply threshold
dnaf = ndimage.gaussian_filter(image, 8)
T = mh.thresholding.otsu(dnaf) # set threshold
plt.imshow(dnaf > T)
epsilon=np.array(dnaf > T)
plt.show()
# remove artifacts connected to image border
cleared = clear_border(epsilon)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# take regions with large enough areas
if region.area >= 50:
# draw rectangle around individual cells
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=0.5)
ax.add_patch(rect)
#ax.set_axis_off()
#plt.tight_layout()
plt.show()
howzer=np.array(image_label_overlay)
What you are looking for is cv2.connectedComponents. Basically, once you have the binary mask that separate the cells, you try to label each connected component of the mask as one cell:
# I choose OTSU instead of binary, but they are not much different in this case
_, mask = cv2.threshold(dapi, 25, 255, cv2.THRESH_OTSU)
# compute the connected component
labels, markers = cv2.connectedComponents(mask)
# load 2nd image in grayscale
# as your 2nd image is only green/black
npm1 = cv2.imread('npm1.jpg', cv2.IMREAD_GRAYSCALE)
# for you image (and usually), labels[0] is the background
for label in labels[1:]:
# compute the histogram over the entire 256 levels of intensity
hist, bins = np.histogram(npm1[markers==label], bins=range(256))
# do whatever you like to hist
# note that bins=range(256) and hist only have 255 values
plt.bar(bins[1:], hist)
plt.title('cell number: {:}'.format(label))
So for example the histogram of the first and second cells:
And the cell markers are:

Categories

Resources