I have some microscopic images where there are precipitates in single states and in some we have in horizontal or vertical lines. Now how should I remove these lines?
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
import cv2
import math
from skimage import (
color, feature, filters, measure, morphology, segmentation, util
)
# Sample1 - T61
image = cv2.imread(r"C:\Users\Stelle1.tif",cv2.IMREAD_GRAYSCALE)
assert not isinstance(image,type(None)), 'image not found'
fig, ax = plt.subplots()
ax.imshow(image, cmap='gray')
ax.axis('off')
plt.imshow()
click to view the image
fig, ax = plt.subplots(figsize=(5, 5))
qcs = ax.contour(image, origin='image')
ax.axis('off')
plt.show()
thresholds = filters.threshold_multiotsu(image, classes=3)
regions = np.digitize(image, bins=thresholds)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(image)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].imshow(regions)
ax[1].set_title('Multi-Otsu thresholding')
ax[1].axis('off')
plt.show()
cells = image > thresholds[0]
dividing = image > thresholds[1]
labeled_cells = measure.label(cells)
labeled_dividing = measure.label(dividing)
naive_mi = labeled_dividing.max() / labeled_cells.max()
print(naive_mi)
higher_threshold = 100
dividing = image > higher_threshold
smoother_dividing = filters.rank.mean(util.img_as_ubyte(dividing),
morphology.disk(4))
binary_smoother_dividing = smoother_dividing > 20
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(binary_smoother_dividing)
ax.set_title('Dividing precipitate')
ax.axis('off')
plt.show()
click to view the image
Here is what I got if I increase the higher_threshold = 100, I will lose the ellipse shape precipitate where I need to count the area and other properties. Can you suggest some solution that the algorithm should not detect the line shape precipitates?
Have you thought about using something like a hough transform to detect straight lines?:
https://scikit-image.org/docs/dev/auto_examples/edges/plot_line_hough_transform.html
I basically lifted this straight from the above tutorial and got some pretty decent out of the box results.
from skimage import io
from skimage.transform import probabilistic_hough_line
from skimage.feature import canny
img = io.imread('GsSj9.png', as_gray=True) # read in the image
edges = canny(img) # use canny filter to detect edges
lines = probabilistic_hough_line(edges, threshold=20, line_length=20, line_gap=3)
# make plot of image and probabilistic_hough_line
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(img)
ax[0].set_title('image')
ax[1].imshow(img * 0)
for line in lines:
p0, p1 = line
ax[1].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[1].set_xlim((0, img.shape[1]))
ax[1].set_ylim((img.shape[0], 0))
ax[1].set_title('Probabilistic Hough')
You would still need to figure out a good way to make a binary image from the transform lines but it could be useful in your endeavor.
Related
0
I want to make a colormap used in the attached image.
img = imread('/path/Screenshot 2022-04-12 at 2.14.16 PM.png')
colors_from_img = img[:, 0, :]
my_cmap = LinearSegmentedColormap.from_list('my_cmap', colors_from_img, N=651)
y = random_sample((100, 100))
imshow(y, cmap=my_cmap);plt.colorbar().png')
Looking forward to your inputs
You just have to transpose the strategy linked in my comment from vertical to horizontal. To avoid random guessing, you analyze first the image dimensions, then guesstimate what level the horizontal line should be (ca 66/3) and what the step for the color bars is (ca 616/11). Finally, you have to normalize the image for the range -0.3 ... 0.5 and tell matplotlib that values above and below should also be considered (extend="both"). This leads us to:
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, BoundaryNorm
import numpy as np
img = plt.imread('test.png')
#analyze image dimensions
#print(img.shape)
#>>> (66, 616, 4)
colors_from_img = img[22, 60::56, :]
#generate color map
my_cmap = LinearSegmentedColormap.from_list("my_cmap", colors_from_img, N=len(colors_from_img))
#normalize with boundaries
my_norm = BoundaryNorm(np.linspace(-0.3, 0.5, 9), my_cmap.N, extend="both")
y = 2*np.random.random_sample((20, 20))-1
plt.imshow(y, cmap=my_cmap, norm=my_norm)
plt.colorbar()
plt.show()
Sample output:
If you want only the color bar to be an image, I would suggest the tutorial in the formula for an example. I have set the color names similar to the colors in your image, but you can change them to whatever colors you like.
import matplotlib.pyplot as plt
import matplotlib as mpl
fig, ax = plt.subplots(figsize=(6, 0.5))
fig.subplots_adjust(bottom=0.5)
c = ['darkblue', 'lightblue', 'aquamarine', 'green', 'lime', 'yellow','orange','red']
cmap = (mpl.colors.ListedColormap(c)
.with_extremes(over='purple', under='white'))
bounds = [-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
fig.colorbar(
mpl.cm.ScalarMappable(cmap=cmap, norm=norm),
cax=ax,
boundaries=[-10] + bounds + [10],
extend='both',
extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal',
#label='Custom extension lengths, some other units',
)
fig.savefig('my_colorbar.png')
plt.show()
I tried this piece of code
from skimage import io
temp = io.imread(mask_input_path)
plt.imshow(temp)
This displays it a normal image, hence the output is black.
Just like a normal image. If your entire mask is black that means desired object is not present in your image.
But to select only masked area you need 2 extra lines on code
import matplotlib.pyplot as plt
input_img = plt.imread('img.jpg')
mask_img = plt.imread('mask.jpg')
# select only masked area below
masked = input_img.copy()
masked[mask_img == 0 ] = 0
fig, axes = plt.subplots(1, 3, figsize=(16, 12))
ax = axes.flatten()
ax[0].imshow(input_img, cmap="gray")
ax[0].set_axis_off()
ax[0].set_title("Original Imput Image", fontsize=12)
ax[1].imshow(mask_img, cmap="gray")
ax[1].set_axis_off()
ax[1].set_title("Mask", fontsize=12)
ax[2].imshow(masked, cmap="gray")
ax[2].set_axis_off()
ax[2].set_title("Masked", fontsize=12)
plt.show()
Actually using
masked[mask_img < 30 ] = 0
gives slightly better results because mask values are not exactly zero in my case
I am trying to plot multiple images in a figure using matplotlib.
Basically, I read the images using PIl library, convert it to numpy array and do some operation on it (setting the elements in a row to zero). Everything works fine till this point. But when I try to save the results using matplotlib, I get inconsistent results.
Please have a look at my code.
Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as PI
Loading the file
fileName = 'n01978287_43.jpg'
img = PI.open(fileName)
size = 224
img = img.resize((size, size))
img = np.asarray(img, dtype=np.uint8).astype(np.float32)
img = img/255
Result 1
temp_img = np.copy(img)
temp_img[51, :, :] = 0*temp_img[51, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 6, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 6, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_516.png')
plt.close(fig)
Result 2
temp_img = np.copy(img)
temp_img[52, :, :] = 0*temp_img[52, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 6, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 6, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_526.png')
plt.close(fig)
Result 3
temp_img = np.copy(img)
temp_img[51, :, :] = 0*temp_img[51, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 2, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 2, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_512.png')
plt.close(fig)
Result 4
temp_img = np.copy(img)
temp_img[56, :, :] = 0*temp_img[56, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 2, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 2, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_562.png')
plt.close(fig)
Now, if you look at the results, you would notice the inconsistency.
Firstly, for first two images (figure with 6 axes), you see the black line only in one of the image. (There is a pattern to this if you zero out all the rows (one at a time) and then try to save the results).
In the last two images, black line gets thicker. (I didn't find any pattern in this case).
System Setup - Python3, Matplotlib3, PIL, Numpy
Update:
After looking for ways to save a figure with the desired resolution (224*224 in this case), I wrote the following code (using multiple resources from web).
Importing libraries and loading the image file
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
fileName = 'n01978287_43.jpg'
img = Image.open(fileName)
size = 224
img = img.resize((size, size))
img = np.asarray(img, dtype=np.uint8).astype(np.float32)
img = img/255
Function to plot the grid of images
def plt_save(grid, idx, name):
nRows = len(grid)
nCols = len(grid[0])
print('Clearing figure')
plt.rcParams.update({'font.size': 8})
wFig = (nCols+2) # Figure width (two more than nCols because I want to add ylabels on the very left and very right of figure)
hFig = (nRows+1) # Figure height (one more than nRows becasue I want to add xlabels to the top of figure)
fig = plt.figure(figsize=( wFig, hFig ))
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
fig.patch.set_facecolor('grey')
for r in range(nRows):
for c in range(nCols):
ax = plt.subplot2grid( shape=[hFig, wFig], loc=[r+1, c+1] )
im= ax.imshow(grid[r][c], interpolation='none')
ax.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
#fig.colorbar(im, ax=ax)
#ax.set_aspect('auto')
if not r:
ax.set_title('Image',
rotation=22.5,
horizontalalignment='left',
verticalalignment='bottom')
if not c:
ax.set_ylabel('leftLabel',
rotation=0,
horizontalalignment='right',
verticalalignment='center')
if c == wFig-3:
ax2 = ax.twinx()
#ax2.axis('off')
ax2.set_xticks([])
ax2.set_yticks([])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.set_ylabel( 'rightLabel',
rotation=0,
verticalalignment='center',
horizontalalignment='left' )
print('Saving file')
plt.savefig( ( str(idx) + '_' + name + '_' + fileName.split('.')[0] + '.png'),
orientation='landscape',
#bbox_inches='tight',
facecolor = fig.get_facecolor(),
dpi=224, # DPI is 224 becasue the axis size is 1x1 inch and I want 224x224 pixels in each axis
transparent=True,
frameon=False )
plt.close(fig)
Loop to zero out the rows of the image (one at a time)
for i in range(0, 224):
temp_img = np.copy(img)
temp_img[i, :, :] = 0*temp_img[i, :, :]
# 1*4 Grid of images (can vary based on the requirement)
grid = [img, temp_img, img, temp_img]
grid = [grid, grid] #2*4 grid of images
plt_save(grid, i, 'PLT_')
Here is how one of the 224 images looks like.
The thing is that it works perfectly as long as I stick with this kind of plot. But the moment I try to make some changes (like adding a colorbar, having some spaces between each axis etc), the image resolution changes. If I use bbox_inches = 'tight' while saving my figure, it adjusts everything but changes the original resolution while keeping the figure size constant.
Is there any other way similar to bbox_inches='tight' such that it can keep the axis resolution fixed while adjusting the figure size accordingly. Or if there is no such thing in matplotlib, could you suggest me any other way to incorporate colorbar (small spaces between axis, ylabel for each axis etc) while keeping the image resolution fixed.
The image you start with has 224 pixels in height.
In the first two cases you distribute those over 72 pixels in the resulting image. This means any row of the image has a 72/224=32% chance of showing up in the final plot. In row number 52 you are lucky and hit this one third chance.
In the second two cases the resulting image is 226 pixels in height (i.e. just slightly larger than the original). Here you have a 2/224=0.9% chance that one row will occupy two pixels. In the case of row no. 56 you hit that unlucky chance.
I have a .png image with alpha channel and a random pattern generated with numpy.
I want to supperpose both images using matplotlib. The bottom image must be the random pattern and over this, I want to see the second image (attached in the end of the post).
The code for both images is the following:
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Random image pattern
fig = plt.subplots(figsize = (20,4))
x = np.arange(0,2000,1)
y = np.arange(0,284,1)
X,Y = np.meshgrid(x,y)
Z = 0.6+0.1*np.random.rand(284,2000)
Z[0,0] = 0
Z[1,1] = 1
# Plot the density map using nearest-neighbor interpolation
plt.pcolormesh(X,Y,Z,cmap = cm.gray)
The result is the following image:
To import the image, I use the following code:
# Sample data
fig = plt.subplots(figsize = (20,4))
# Plot the density map using nearest-neighbor interpolation
plt.imread("good_image_2.png")
plt.imshow(img)
print(img.shape)
The image is the following:
Thus, the final result that I want is:
You can make an image-like array for Z and then just use imshow to display it before the image of the buttons, etc. Note that this only works because your png has an alpha channel.
Code:
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Plot the density map using nearest-neighbor interpolation
img = plt.imread("image.png")
(xSize, ySize, cSize) = img.shape
x = np.arange(0,xSize,1)
y = np.arange(0,ySize,1)
X,Y = np.meshgrid(x,y)
Z = 0.6+0.1*np.random.rand(xSize,ySize)
Z[0,0] = 0
Z[1,1] = 1
# We need Z to have red, blue and green channels
# For a greyscale image these are all the same
Z=np.repeat(Z,3).reshape(xSize,ySize,3)
fig = plt.figure(figsize=(20,8))
ax = fig.add_subplot(111)
ax.imshow(Z, interpolation=None)
ax.imshow(img, interpolation=None)
fig.savefig('output.png')
Output:
You can also turn off axes if you prefer.
ax.axis('off')
I am trying to use SLIC to obtain superpixels and get semantic segmentation of an image.
img = cv2.imread(img_name)
segments = slic(image, n_segments = numSegments, sigma = 3,convert2lab=True,max_iter=25)
How do I get the box2d for each of the segments? and if there a hierarchical tree of the segments how do I fetch that?
I did not read the original paper, but according to documentation it does not return a hierarchy.
I assume that you mean bounding boxes, so used the skimage example of Regionprops to get bounding boxes for each superpixel returned by SLIC.
Result:
Code:
from skimage.segmentation import slic
from skimage.data import astronaut
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage.measure import label
from skimage.measure import regionprops
from skimage.color import label2rgb
img = astronaut()
segments = slic(img, n_segments=50, compactness = 100)
image_label_overlay = label2rgb(segments, image=img)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(segments):
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()