creating a neon-glow with python numpy - python

I'm trying to create a neon-effect w/ a source image. I have included three images, the source, my current attempt & a target. The program takes the image, finds the white-edges, & calculates the distance from each pixel to the nearest white-edge (these parts both work fine); from there, I am struggling to find the right saturation and value parameters to create the neon-glow.
From the target image, what I need to happen is basically for the saturation to be 0 on a white-edge, then to dramatically increase the further away it gets from an edge; for value, I need it to be 1 on a white-edge, then to dramatically decrease. I can't figure out the best way to manipulate distance_image (which holds each pixel's distance from the nearest white-edge) such as to achieve these two results with saturation and value.
from PIL import Image
import cv2
import numpy as np
from scipy.ndimage import binary_erosion
from scipy.spatial import KDTree
def find_closest_distance(img):
white_pixel_points = np.array(np.where(img))
tree = KDTree(white_pixel_points.T)
img_meshgrid = np.array(np.meshgrid(np.arange(img.shape[0]),
np.arange(img.shape[1]))).T
distances, _ = tree.query(img_meshgrid)
return distances
def find_edges(img):
img_np = np.array(img)
kernel = np.ones((3,3))
return img_np - binary_erosion(img_np, kernel)*255
img = Image.open('a.png').convert('L')
edge_image = find_edges(img)
distance_image = find_closest_distance(edge_image)
max_dist = np.max(distance_image)
distance_image = distance_image / max_dist
hue = np.full(distance_image.shape, 0.44*180)
saturation = distance_image * 255
value = np.power(distance_image, 0.2)
value = 255 * (1 - value**2)
new_tups = np.dstack((hue, saturation, value)).astype('uint8')
new_tups = cv2.cvtColor(new_tups, cv2.COLOR_HSV2BGR)
new_img = Image.fromarray(new_tups, 'RGB').save('out.png')
The following images show the source data (left), the current result (middle), and the desired result (right).

I think I would do this with convolution instead. Convolving an image with a Gaussian kernel is a common way to blur an image. You can do it in various ways, but maybe the easiest to use is scipy.ndimage.gaussian_filter. Here's one way to implement all this, see if you like the result.
from PIL import Image
from io import BytesIO
import requests
import numpy as np
r = requests.get('https://i.stack.imgur.com/MhUQZ.png')
img = Image.open(BytesIO(r.content))
imarray = np.asarray(img)[..., 0] / 255
This is your first image, the white rectangles.
Now I'll make those outlines, do the blur, create the colour images, and combine them:
from scipy.ndimage import binary_erosion
from scipy.ndimage import gaussian_filter
eroded = binary_erosion(imarray, iterations=3)
# Make the outlined rectangles.
outlines = imarray - eroded
# Convolve with a Gaussian to effect a blur.
blur = gaussian_filter(outlines, sigma=11)
# Make binary images into neon green.
neon_green_rgb = [0.224, 1.0, 0.0784]
outlines = outlines[:, :, None] * neon_green_rgb
blur = blur[:, :, None] * neon_green_rgb
# Combine the images and constrain to [0, 1].
blur_strength = 3
glow = np.clip(outlines + blur_strength*blur, 0, 1)
And look at it:
import matplotlib.pyplot as plt
plt.imshow(glow)
You'll want to adjust the sigma of the Gaussian (its width), the colours, blur strength, and so on. Hope it helps.

Here is one way to do that in Python/OpenCV.
Read the input
Convert to grayscale
Threshold to binary
Get edges of desired thickness using morphology gradient
Invert the edges so black on white background
Do distance transform
Stretch to full dynamic range
Invert
Normalize to range 0 to 1 by dividing by the maximum value
Attenuate using a power law to control distance roll-off (ramping)
Create a color image of the size of the input and the desired color
Multiply the attenuated image by the color image
Save results
Input:
import cv2
import numpy as np
import skimage.exposure
# read input
img = cv2.imread('rectangles.png')
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# do morphology gradient to get edges and invert so black edges on white background
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
edges = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel)
edges = 255 - edges
# get distance transform
dist = edges.copy()
distance = cv2.distanceTransform(dist, distanceType=cv2.DIST_L2, maskSize=3)
print(np.amin(distance), np.amax(distance))
# stretch to full dynamic range and convert to uint8 as 3 channels
stretch = skimage.exposure.rescale_intensity(distance, in_range=('image'), out_range=(0,255))
# invert
stretch = 255 - stretch
max_stretch = np.amax(stretch)
# normalize to range 0 to 1 by dividing by max_stretch
stretch = (stretch/max_stretch)
# attenuate with power law
pow = 4
attenuate = np.power(stretch, pow)
attenuate = cv2.merge([attenuate,attenuate,attenuate])
# create a green image the size of the input
color_img = np.full_like(img, (0,255,0), dtype=np.float32)
# multiply the color image with the attenuated distance image
glow = (color_img * attenuate).clip(0,255).astype(np.uint8)
# save results
cv2.imwrite('rectangles_edges.png', edges)
cv2.imwrite('rectangles_stretch.png', (255*stretch).clip(0,255).astype(np.uint8))
cv2.imwrite('rectangles_attenuate.png', (255*attenuate).clip(0,255).astype(np.uint8))
cv2.imwrite('rectangles_glow.png', glow)
# view results
cv2.imshow("EDGES", edges)
cv2.imshow("STRETCH", stretch)
cv2.imshow("ATTENUATE", attenuate)
cv2.imshow("RESULT", glow)
cv2.waitKey(0)
Edges (inverted):
Stretched Distance Transform:
Attenuated Distance Transform:
Glow Result:

Related

How to improve a variable lens blur algorithm in Python OpenCV?

I want to emulate the blur of a cheap camera lens (like Holga).
Blur is very weak close to the photo center.
And it's getting more decisive close to corners.
I wrote the code and it works in general.
Input image:
Result image:
.
But I feel that it could be done better and faster.
I've found a similar question but it still has no answer.
How to improve an algorithm speed and avoid iteration over pixels?
UPDATE:
It's not the same as standard Gaussian or 2D filter blur with constant kernel size.
import cv2
import numpy as np
import requests
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
def blur(img=None, blur_radius=None, test=False):
# test image loading
if img is None:
test=True
print('test mode ON')
print('loading image...')
url = r'http://www.lenna.org/lena_std.tif'
resp = requests.get(url, stream=True).raw
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
cv2.imwrite('img_input.png', img)
print('image loaded')
# channels splitting
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(img_lab)
if test:
cv2.imwrite('l_channel.png', l)
print('l channel saved')
# make blur map
height, width = l.shape[:2]
center = np.array([height/2, width/2])
diag = ((height / 2) ** 2 + (width / 2) ** 2) ** 0.5
blur_map = np.linalg.norm(
np.indices(img.shape[:2]) - center[:,None,None] + 0.5,
axis = 0
)
if blur_radius is None:
blur_radius = int(max(height, width) * 0.03)
blur_map = blur_map / diag
blur_map = blur_map * blur_radius
if test:
blur_map_norm = cv2.normalize(blur_map, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_32F)
cv2.imwrite('blur_map.png', blur_map_norm)
print('blur map saved')
# very inefficient blur algorithm!!!
l_blur = np.copy(l)
for x in tqdm(range(width)):
for y in range(height):
kernel_size = int(blur_map[y, x])
if kernel_size == 0:
l_blur[y, x] = l[y, x]
continue
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
cut = l[
max(0, y - kernel_size):min(height, y + kernel_size),
max(0, x - kernel_size):min(width, x + kernel_size)
]
if cut.shape == kernel.shape:
cut = (cut * kernel).mean()
else:
cut = cut.mean()
l_blur[y, x] = cut
if test: cv2.imwrite('l_blur.png', l_blur); print('l_blur saved')
if test: print('done')
return l_blur
blur()
The only way to implement a filter where the kernel is different for every pixel is to create the kernel for each pixel and apply it in a loop, like OP's code does. The Fourier transform does not apply to this case. Python is a very slow language, the same algorithm implemented in a compiled language would be much faster. Unless there is some predefined structure in how the kernel is created at each pixel, there is no way to reduce the complexity of the algorithm.
For example, the uniform filter with a square kernel (commonly called the "box" filter) can be computed based on the integral image, using only 4 additions per pixel. This implementation should be able to choose a different kernel size at each pixel without any additional cost.
DIPlib has an implementation of an adaptive Gaussian filter [disclaimer: I'm an author of DIPlib, but I did not implement this functionality]. Here is the documentation.
This filter applies a Gaussian filter, but the Gaussian kernel is scaled and rotated differently at every pixel.
Lens blur is not a Gaussian, but it's not easy to see the difference by eye in most cases; the difference matters only if there is a very small dot with high contrast.
OP's case would be implemented as follows:
import diplib as dip
img = dip.ImageRead('examples/trui.ics')
blur_map = dip.CreateRadiusSquareCoordinate(img.Sizes())
blur_map /= dip.Maximum(blur_map)
img_blur = dip.AdaptiveGauss(img, [0, blur_map], sigmas=[5])
(the blur_map here is defined differently, I chose a quadratic function of the distance to the center, because I think it looks really nice; use dip.CreateRadiusCoordinate() to reproduce OP's map).
I've chosen a maximum blur of 5 (this is the sigma, in pixels, of the Gaussian, not the footprint of the kernel), and blur_map here scales this sigma with a factor between 0 in the middle and 1 at the corners of the image.
Another interesting effect would be as follows, with increasing blur tangential to each circle centered in the middle of the image, with very little blur radially:
angle_map = dip.CreatePhiCoordinate(img.Sizes())
img_blur = dip.AdaptiveGauss(img, [angle_map, blur_map], sigmas=[8,1])
Here is one way to apply (uniform, non-varying) lens defocus blur in Python/OpenCV by transforming both the image and filter to the Fourier (frequency) domain.
Read the input
Take dft of input to transform to Fourier domain
Draw a white filled circle on a black background the size of the input as a mask (filter kernel). This is the defocus kernel in the spatial domain, i.e. a circular rect function.
Blur the circle slightly to anti-alias the edge
Roll the mask so that the center is at the origin (top left corner) and normalize so that the sum of values = 1
Take dft of mask to transform to Fourier domain where its amplitude profile is a jinx function.
Multiply the two dft images to apply the blur
Take the idft of the product to transform back to spatial domain
Get the magnitude of the real and imaginary components of the product, clip and convert to uint8 as the result
Save the result
Input:
import numpy as np
import cv2
# read input and convert to grayscale
img = cv2.imread('lena_512_gray.png', cv2.IMREAD_GRAYSCALE)
# do dft saving as complex output
dft_img = np.fft.fft2(img, axes=(0,1))
# create circle mask
radius = 32
mask = np.zeros_like(img)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv2.circle(mask, (cx,cy), radius, 255, -1)[0]
# blur the mask slightly to antialias
mask = cv2.GaussianBlur(mask, (3,3), 0)
# roll the mask so that center is at origin and normalize to sum=1
mask_roll = np.roll(mask, (256,256), axis=(0,1))
mask_norm = mask_roll / mask_roll.sum()
# take dft of mask
dft_mask_norm = np.fft.fft2(mask_norm, axes=(0,1))
# apply dft_mask to dft_img
dft_shift_product = np.multiply(dft_img, dft_mask_norm)
# do idft saving as complex output
img_filtered = np.fft.ifft2(dft_shift_product, axes=(0,1))
# combine complex real and imaginary components to form (the magnitude for) the original image again
img_filtered = np.abs(img_filtered).clip(0,255).astype(np.uint8)
cv2.imshow("ORIGINAL", img)
cv2.imshow("MASK", mask)
cv2.imshow("FILTERED DFT/IFT ROUND TRIP", img_filtered)
cv2.waitKey(0)
cv2.destroyAllWindows()
# write result to disk
cv2.imwrite("lena_512_gray_mask.png", mask)
cv2.imwrite("lena_dft_numpy_lowpass_filtered_rad32.jpg", img_filtered)
Mask - Filter Kernel In Spatial Domain:
Result for Circle Radius=4:
Result for Circle Radius=8:
Result for Circle Radius=16:
Result for Circle Radius=32
:
ADDITION
Using OpenCV for the dft, etc rather than Numpy, the above becomes:
import numpy as np
import cv2
# read input and convert to grayscale
img = cv2.imread('lena_512_gray.png', cv2.IMREAD_GRAYSCALE)
# do dft saving as complex output
dft_img = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
# create circle mask
radius = 32
mask = np.zeros_like(img)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv2.circle(mask, (cx,cy), radius, 255, -1)[0]
# blur the mask slightly to antialias
mask = cv2.GaussianBlur(mask, (3,3), 0)
# roll the mask so that center is at origin and normalize to sum=1
mask_roll = np.roll(mask, (256,256), axis=(0,1))
mask_norm = mask_roll / mask_roll.sum()
# take dft of mask
dft_mask_norm = cv2.dft(np.float32(mask_norm), flags = cv2.DFT_COMPLEX_OUTPUT)
# apply dft_mask to dft_img
dft_product = cv2.mulSpectrums(dft_img, dft_mask_norm, 0)
# do idft saving as complex output, then clip and convert to uint8
img_filtered = cv2.idft(dft_product, flags=cv2.DFT_SCALE+cv2.DFT_REAL_OUTPUT)
img_filtered = img_filtered.clip(0,255).astype(np.uint8)
cv2.imshow("ORIGINAL", img)
cv2.imshow("MASK", mask)
cv2.imshow("FILTERED DFT/IFT ROUND TRIP", img_filtered)
cv2.waitKey(0)
cv2.destroyAllWindows()
# write result to disk
cv2.imwrite("lena_512_gray_mask.png", mask)
cv2.imwrite("lena_dft_opencv_defocus_rad32.jpg", img_filtered)

What is wrong with my focus stacking algorithm?

You can see in the final focus stacked image that the whole image is in focus. However, pieces of the image are missing and I have no clue why. The basic steps of my algorithm are:
Access images. Convert images to grayscale, blur the gray images a bit, then find the Laplacian of these images. I store all Laplaced images in a list.
Cycle through pixels in a blank image using for loops. Every iteration creates a list containing the pixel intensities of the gray, blurred, Laplaced images at that pixel value. Find the max pixel intensity. Then look at the BGR value of the ORIGINAL image where the max pixel intensity came from. Set the BGR value of the blank pixel equal to that of the max-intensity pixel.
Here is my code:
images = glob2.glob("Pics\\step*") # Accesses images in the Pics folder
laps = [] # A list to contain Laplacians of images in Pics
i=0
for image in images:
img = cv.imread(image) # Reads image in Pics
images[i] = img # Bc line 6 only creates a list of image NAMES (ie strings), not actual images, this replaces string w image
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Converts image to grayscale
gauss = cv.GaussianBlur(img, (3,3), 0) # Blurs grayed image a bit
lap = cv.Laplacian(gauss, cv.CV_64F) # Converts blurred, gray image to Laplacian
lap = np.uint8(np.absolute(lap)) # Converts to Laplacian
laps.append(lap) # Adds Laplacian to laps
i += 1
sample = laps[0] # Arbitrarily accesses the first image in laps so that we can get dimensions for line 22
fs = np.zeros((sample.shape[0], sample.shape[1], 3), dtype='uint8') # Creates a blank image with the dimensions of sample
for x in range(sample.shape[0]): # The for loops go through every x and y value
for y in range(sample.shape[1]):
intensities = [laps[0][x,y], laps[1][x,y], laps[2][x,y], laps[3][x,y], laps[4][x,y], laps[5][x,y]] # List of intensities of laplacian images
color = images[intensities.index(max(intensities))][x,y] # Finds BGR value of the x,y pixel in the ORIGINAL image corresponding to the highest intensity
fs[x, y] = color # Sets pixel of blank fs image to the color of the pixel with the strongest intensity
cv.imshow('FS', fs)
Here is what the code produces:
Broken Focus Stacked Image
I was inspired by your code and made this simple script, which seems to work fine. (I do not need to align images.) Using mask to select pixels in focus may be faster, but I haven't tried to compare both versions. I would appreciate any advice on how to improve it.
from pathlib import Path
from imageio import imread, imwrite
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2hsv, rgb2gray
from skimage import img_as_float, img_as_ubyte
from scipy.ndimage.filters import gaussian_filter
from skimage.filters.rank import gradient
from skimage.morphology import disk
im_dir = Path("test")
sigma = 3
print("_____ load images _____")
fps = [f for f in im_dir.glob("*.jpg")]
print([f.name for f in fps])
images_rgb = [imread(f) for f in fps]
images_rgb_cube = np.array(images_rgb)
print("images_rgb_cube", images_rgb_cube.shape, images_rgb_cube.dtype)
print("_____ images to grey _____")
#images_grey = [rgb2hsv(im)[:,:,2] for im in ims] # slow
images_grey = [rgb2gray(im) for im in images_rgb] # faster
print("_____ get gradients _____")
selection_element = disk(sigma) # matrix of n pixels with a disk shape
grads = [gradient(im, selection_element) for im in images_grey]
grads = np.array(grads)
print("grads", grads.shape, grads.dtype)
print("_____ get mask _____")
mask_grey = grads.max(axis=0, keepdims=1) == grads # https://stackoverflow.com/questions/47678252/mask-from-max-values-in-numpy-array-specific-axis
mask_rgb = np.repeat(mask_grey[:, :, :, np.newaxis], 3, axis=3)
print("mask_rgb", mask_rgb.shape, mask_rgb.dtype)
print("_____ apply mask _____")
image_sharp = images_rgb_cube * mask_rgb
image_sharp = image_sharp.max(axis=0)
print("image_sharp", image_sharp.shape, image_sharp.dtype)
print("_____ save image _____")
imwrite(im_dir / "stacked.jpeg", image_sharp)
plt.imshow(image_sharp)
plt.show()
print("_____ save masks _____")
print("mask_grey", mask_grey.shape, mask_grey.dtype)
for i in range(mask_grey.shape[0]):
mask = mask_grey[i]
fp = im_dir / "{}_mask.jpeg".format(fps[i].stem)
imwrite(fp, img_as_ubyte(mask))
print("saved", fp, mask.shape, mask.dtype)

How to eliminate the edges surrounding the differences in the SSIM image output of compare_ssim from skimage.measure

I am trying to write a code that calculates the area occupied by the tracks on a microscope image, as this:
As the tracks aren't uniform (I mean, they don't have a unique grey level as they are darker in the edge and lighter in the centre), I can't do this just by comparing their grey level with the grey level of the background because in some parts of the tracks it is the same.
Therefore, what I tried is to compare the image with an image of the background:
I do this in order to extract the differences between these two images (which correspond to the tracks themselves). In order to do this, I have used the compare_ssim function from skimage. The code I've used is the following:
from skimage.measure import compare_ssim
import imutils
import cv2
import numpy as np
# load the two input images
imageA = cv2.imread("./b.jpg")
imageB = cv2.imread("./a.jpg")
# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
# show the diff image
cv2.imshow("Diff", diff)
The diff image I obtain is the following:
This is quite good, because now the grey level in the tracks is well-differentiated from the grey level of the background, and I can calculate the area occupied by the tracks (there are some points in the track that are lighter, but they are few and it's okay for my purpose).
Nevertheless, the problem is that in the diff image not only the tracks appear in black, but also a thick border surrounding the tracks:
This edge makes my area estimation incorrect.
So I would like to know how I can eliminate that edge, or at least make it thinner.
If this is not possible, it would be really helpful if you can show me another Python function that can achieve my purpose of calculating the area occupied by the tracks.
There is always a better way to do the same thing, but I used here a simple approach, which you can improve or tune later according to your needs:
Algorithm:
Firstly, proper thresholding will keep only the edges
Secondly, Morphology (dilation, or erosion depending on your thresholding approach) will thin your Edges.
At the end, to get rid of everything, except your tracks, you can use Flood_Fill algorithm, then count the white pixels to get your area(in pixels).
Results:
In the Final result:
The count of the white of pixels is: 52219
Code:
#========================
# Import Libraries
#========================
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage.morphology import flood_fill
#========================
# Read images
#========================
img = cv2.imread('1.png',0)
bck = cv2.imread('2.png',0)
#========================
# Gaussian Blur
#========================
gauss = cv2.GaussianBlur(img, (5,5), 1)
bck_gauss = cv2.GaussianBlur(bck, (5,5), 1)
#========================
# Thresholding
#========================
_,thresh1 = cv2.threshold(gauss,127,255,cv2.THRESH_BINARY)
_,bck_th = cv2.threshold(bck_gauss,127,255,cv2.THRESH_BINARY_INV)
# Get rid of black borders
thresh1 = thresh1 + bck_th
#========================
# Morphology
#========================
kernel = np.zeros((7,7),np.uint8)
kernel[2,:] = 1
kernel[:,2] = 1
dilation = cv2.dilate(thresh1,kernel,iterations = 1)
#========================
# Flood Fill
#========================
res = flood_fill(dilation,(1,1), 0)
print("The count of the white of pixels is: ", int(np.sum(res)/np.max(res)))
#========================
# Visualize Results
#========================
plt.figure(num='Blobs')
plt.subplot(221)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
plt.subplot(222)
plt.imshow(thresh1, cmap='gray')
plt.title('Thresholded')
plt.axis('off')
plt.subplot(223)
plt.imshow(dilation, cmap='gray')
plt.title('Morphology')
plt.axis('off')
plt.subplot(224)
plt.imshow(res, cmap='gray')
plt.title('Final Result')
plt.axis('off')
plt.show()

How to get histogram of intensity of individual masked cells in an image?

OK so newbie here that has been working on a set of homework problems with the original post here: How do I make a mask from one image and then transfer it to another?
. The original idea was to take the DAPI image (grey image) and apply it as a mask to the NPM1 (green) image. After implementing the suggested code from HansHirse (thanks!) along with some other code I had been making for the homework problem I finally got a working histogram of all compatible cells in the image. The "compatibility" bit is that any cells touching the border weren't supposed to be counted. However, I still need to find a way to get histograms of each individual cell as well. I've attached the original images from the post too:
To do this, I tried blob_doh and one other method to get segmented regions of each cell but have no idea as to how I can apply these coordinates to an image for the histogram.
PS. The code is a bit messy. I segmented the code such that the blob_doh is near the bottom and the other method is also its own separate piece at the very bottom. Sorry!
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import cv2
import mahotas as mh
import scipy
from scipy import ndimage
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
# Read image into numpy array
image = cv2.imread("NOTREATDAPI.jpg",0)
dna = np.array(image) # must be gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
epsilon = np.array(cleared) #final masked DAPI product
print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Load and reset original images
image = cv2.imread("NOTREATDAPI.jpg",0) #The DAPI Image
image1 = cv2.imread("NOTREATNPM1.jpg",0) #The NPM1 Image
print("Original DAPI Image");plt.imshow(image);plt.show() #The DAPI Image
print("Original NPM1 Image");plt.imshow(image1);plt.show() #The NPM1 Image
# Create an array of bool of same shape as image
maskAboveThreshold = epsilon > 0 #Use mask array from above - include only values above non-masked zeros
print("Final Masked Image of NPM1"); plt.imshow(image1 *
maskAboveThreshold, cmap='gray')
plt.show()
True_NPM1= image1 * maskAboveThreshold # Final masked version of NPM1 set back to grayscale
# Create a mask using the DAPI image and binary thresholding at 25
_, mask = cv2.threshold(True_NPM1, 1, 255, cv2.THRESH_BINARY)
# Do some morphological opening to get rid of small artifacts
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))
# Calculate the histogram using the NPM1 image and the obtained binary
mask
hist = cv2.calcHist([image1], [0], mask, [256], [0, 256])
# Show bar plot of calculated histogram
plt.bar(np.arange(256), np.squeeze(hist))
plt.show()
# Show mask image
plt.imshow(mask)
plt.show()
#blob_doh way of segmenting the cells ------
import cv2 as cv
from PIL import Image, ImageDraw
image10 = np.array(Image.open("OXALIDAPI.jpg"))
plt.imshow(image10)
#Convert to gaussian image with thresholds
image10 = cv2.imread("OXALIDAPI.jpg",0)
dna = np.array(image10) # gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
image = np.array(cleared) #final masked DAPI product
#print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Convert image to grayscale
image_gray = rgb2gray(image)
plt.imshow(image_gray,cmap="gray")
def plot_blobs(img,blobs):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r*1.25, color="red", linewidth=1, fill=False)
ax.add_patch(c)
# blob_doh
blobs_doh = blob_doh(image_gray, min_sigma=10, max_sigma=256,
threshold=.025)
plot_blobs(image,blobs_doh)
#get blob coordinates
def filter_blobs(blobs,r_cutoff=5):
new_blobs = []
for b in blobs:
if b[2] > r_cutoff:
new_blobs.append(b)
return new_blobs
new_blobs = filter_blobs(blobs_doh)
#plot_blobs(image,new_blobs)
print(new_blobs)
#Other method of segmenting cells. maybe useful?
yeta = cv2.imread("NOTREATDAPI.jpg",0)
image = np.array(yeta)
# apply threshold
dnaf = ndimage.gaussian_filter(image, 8)
T = mh.thresholding.otsu(dnaf) # set threshold
plt.imshow(dnaf > T)
epsilon=np.array(dnaf > T)
plt.show()
# remove artifacts connected to image border
cleared = clear_border(epsilon)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# take regions with large enough areas
if region.area >= 50:
# draw rectangle around individual cells
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=0.5)
ax.add_patch(rect)
#ax.set_axis_off()
#plt.tight_layout()
plt.show()
howzer=np.array(image_label_overlay)
What you are looking for is cv2.connectedComponents. Basically, once you have the binary mask that separate the cells, you try to label each connected component of the mask as one cell:
# I choose OTSU instead of binary, but they are not much different in this case
_, mask = cv2.threshold(dapi, 25, 255, cv2.THRESH_OTSU)
# compute the connected component
labels, markers = cv2.connectedComponents(mask)
# load 2nd image in grayscale
# as your 2nd image is only green/black
npm1 = cv2.imread('npm1.jpg', cv2.IMREAD_GRAYSCALE)
# for you image (and usually), labels[0] is the background
for label in labels[1:]:
# compute the histogram over the entire 256 levels of intensity
hist, bins = np.histogram(npm1[markers==label], bins=range(256))
# do whatever you like to hist
# note that bins=range(256) and hist only have 255 values
plt.bar(bins[1:], hist)
plt.title('cell number: {:}'.format(label))
So for example the histogram of the first and second cells:
And the cell markers are:

How can I find the center of the pattern and the distribution of a color around it on python with opencv/skimage?

I have an image, that I want to process. I'm using Opencv and skimage. My goal is to find the distribution of the red dots around the barycenter of all the dots. I proceed as follows : first I select the color, and then I binarize the image that I obtain. Eventually, I would just count the red pixel that are on the rings with a certain width around that barycenter, in order to have an average distribution with regards to the radius supposing a cylindrical symmetry.
My issue is that I have no idea how to find the position of the barycenter.
I would also like to know if there is an short way to count the red pixels in the rings.
Here is my code :
import cv2
import matplotlib.pyplot as plt
from skimage import io, filters, measure, color, external
I'm uploading the image :
sph = cv2.imread('image_sper.jpg')
sph = cv2.cvtColor(sph, cv2.COLOR_BGR2RGB)
plt.imshow(sph)
plt.show()
I want to select the red color. Following https://realpython.com/python-opencv-color-spaces/, I'm converting it in HSV, and I'm using a mask.
hsv_sph = cv2.cvtColor(sph, cv2.COLOR_RGB2HSV)
light_red = (1, 100, 100)
dark_red = (18, 255, 255)
mask = cv2.inRange(hsv_sph, light_red, dark_red)
result = cv2.bitwise_and(sph, sph, mask=mask)
And here is the result :
plt.imshow(result)
plt.show()
Now I'm binarizing the image, since it'll be easier to process it afterwards.
red_image = result[:,:,1]
red_th = filters.threshold_otsu(red_image)
red_mask = red_image > red_th;
red_mask.dtype ;
io.imshow(red_mask);
And here we are :
What I would like some help now to find the barycenter of the white pixels.
Thx
Edit : The binarization gives the image boolean values False/True for the pixels. I don't know how to transform them to 0/1 pixels. If False was 0 and True 1, a code to find the barycenter would be :
np.shape(red_mask)
(* (321L, 316L) *)
bari=0
barj=0
N=0
for i in range(321):
for j in range(316):
bari=bari+red_mask[i,j]*i
barj=barj+red_mask[i,j]*j
N=N+red_mask[i,j]
bari=bari/N
barj=barj/N
Another question that should have been asked here: http://answers.opencv.org/questions/
But, let's go!
The process that I have implemented uses mostly structural analysis (https://docs.opencv.org/3.3.1/d3/dc0/group__imgproc__shape.html#ga17ed9f5d79ae97bd4c7cf18403e1689a)
First I got your image:
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import io, filters, measure, color, external
sph = cv2.imread('points.png')
ret,thresh = cv2.threshold(sph,200,255,cv2.THRESH_BINARY)
Then eroded and converted it for noise reduction
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
opening = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY);
opening = cv2.convertScaleAbs(opening)
Then used "cv::findContours (InputOutputArray image, OutputArrayOfArrays contours, OutputArray hierarchy, int mode, int method, Point offset=Point())" to find all blobs.
After that, just calculate the center of each region and do a weighted average based on the contour area. This way, I got the points centroid (X:143.4202820443726 , Y:154.56471750651224).
im2, contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = []
centersX = []
centersY = []
for cnt in contours:
areas.append(cv2.contourArea(cnt))
M = cv2.moments(cnt)
centersX.append(int(M["m10"] / M["m00"]))
centersY.append(int(M["m01"] / M["m00"]))
full_areas = np.sum(areas)
acc_X = 0
acc_Y = 0
for i in range(len(areas)):
acc_X += centersX[i] * (areas[i]/full_areas)
acc_Y += centersY[i] * (areas[i]/full_areas)
print (acc_X, acc_Y)
cv2.circle(sph, (int(acc_X), int(acc_Y)), 5, (255, 0, 0), -1)
plt.imshow(sph)
plt.show()

Categories

Resources