skimage.measure.regionprops labels' corresponding measurements? - python

Using scikit library I was analysing the defects' area and mean diameter. Here is the code and the respective segmented regions.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage import measure, io, img_as_ubyte
from skimage.color import label2rgb, rgb2gray
img = cv2.imread("F:\py_image_pro\pore.jpg", 0)
scale = 0.086 #1 pixel in microns
from skimage.filters import threshold_otsu
threshold = threshold_otsu(img)
thresholded_img = img < threshold
#plt.imshow(thresholded_img, cmap='gray')
#plt.show()
from skimage.segmentation import clear_border
edge_touching_removed = clear_border(thresholded_img)
label_image = measure.label(edge_touching_removed, connectivity=img.ndim)
#plt.imshow(label_image)
#plt.show()
image_label_overlay = label2rgb(label_image, image=img)
plt.imshow(image_label_overlay)
plt.show()
props = measure.regionprops_table(label_image, img, properties=['label', 'area', 'equivalent_diameter', 'mean_intensity', 'solidity'])
import pandas as pd
df = pd.DataFrame(props)
df = df[df['area'] > 20]
df['area_in_microns'] = df['area'] * (scale**2)
df['equivalent_diameter_microns'] = df['equivalent_diameter'] * (scale)
print(df.head())
Used regionprops to measure the segmented regions. Segmented image
I would like to know if there is any way to display the labels in the output image so that segmented labels' corresponding measurements can be known?

Are you asking whether you can display a colormapped version of the measurements on top of the image? If so, the answer is yes! You can use skimage.util.map_array for this.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage import (
color, data, filters, measure,
morphology, segmentation, util
)
# grab the image
coins = data.coins()
# segment the image; from:
# https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_expand_labels.html
edges = filters.farid(coins)
markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2
watershed = segmentation.watershed(edges, markers)
segmented_raw = measure.label(watershed == 2)
# remove tiny background objects due to noise
segmented = morphology.remove_small_objects(segmented_raw, 64)
# measure regionprops
table = pd.DataFrame(measure.regionprops_table(
segmented, coins, properties=('label', 'area')
))
# map the labels to measured properties
colored_by_area = util.map_array(
segmented,
np.asarray(table['label']),
np.asarray(table['area']).astype(float),
)
# set 0 to nan, so it appears as transparent in pyplot.imshow
colored_by_area[colored_by_area==0] = np.nan
# display the results
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
colored_by_label = color.label2rgb(segmented, image=coins, bg_label=0)
axes[0].imshow(colored_by_label)
axes[0].set_axis_off()
axes[0].set_title('segmentation')
axes[1].imshow(coins, cmap='gray')
axim = axes[1].imshow(colored_by_area, cmap='viridis')
axes[1].set_axis_off()
axes[1].set_title('segment area')
plt.colorbar(axim, ax=axes[1], fraction=0.05, label='area (px)')
plt.show()

Related

Replacing values in a massive numpy array

Good evening all, I'm really struggling with my code. I've made a 1D spectrum from a fits file. I've extracted the numerical values for each point along the file, but there are vertical lines of overexposed pixel values. I want to replace all values above 3000 with 0. This is what I've done so far:
import astropy as ap
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from astropy.io import fits
from pathlib import Path
from astropy.nddata import CCDData
from ccdproc import ImageFileCollection
import ccdproc as ccdp
from os import listdir, walk
import astropy.units as u
# this function converts the class astropy.io.fits.hdulist.HDUList to a numpy array as ccd data
fitsfile = fits.open("img/HLXSpectrum.fits")
def spec(fitsfile):
specList = fits.open("img/HLXSpectrum.fits", include_path=True)
imgList = []
for img in specList:
ccd = CCDData(fitsfile[0].data, unit="adu")
HLX = ccdp.trim_image(ccd, fits_section="[:2050, 480:840]")
imgList.append(ccd)
fitsfile.close()
specImg = CCDData(ccd, unit="adu")
return specImg
specImg = spec(fitsfile)
skyarray1 = specImg[180:220, 50:2045]
spectrum1 = np.array(skyarray1)
skyarray2 = specImg[220:260, 50:2045]
spectrum2 = np.array(skyarray2)
skyarray3 = specImg[140:180, 50:2045]
spectrum3 = np.array(skyarray3)
spectrumA = spectrum2 - spectrum3
spectrum = spectrumA - spectrum1
flux = []
pixel = []
fix = np.where(spectrum > 3000, spectrum, 0)
for i in range(len(fix[1])): # cropped img in x dimension
flux.append(np.sum(skyarray1[:, i]))
pixel.append(i)
plt.figure(figsize=(20, 16), dpi=800)
plt.plot(pixel, flux, color="red")
fig1 = plt.gcf()
plt.show()
# fig1.savefig("flux.png", dpi=800)
but no matter what I do, the image stays the same, even though the values in the arrays change. Why?
The problem comes down to what you're plotting here:
fix = np.where(spectrum > 3000, spectrum, 0)
for i in range(len(fix[1])): # cropped img in x dimension
flux.append(np.sum(skyarray1[:, i]))
pixel.append(i)
plt.figure(figsize=(20, 16), dpi=800)
plt.plot(pixel, flux, color="red")
fig1 = plt.gcf()
plt.show()
You're plotting flux, which is taking values from skyarray1, which has not been modified. I think you want to replace it with fix like this:
for i in range(len(fix[1])): # cropped img in x dimension
flux.append(np.sum(fix[:, i]))
pixel.append(i)

How do I crop all TIFF images on a white background in a folder with python3?

I'm scanning a book and now I'm on the final step. I need to crop all squared images with content in the center by white background without saving ratio. Some libs do not support TIFF format.
How can I do that ?
I combined some examples from How do I crop an image on a white background with python?, How do I crop an image on a white background with python? and other questions. It works
from PIL import Image
from skimage import io, img_as_float
import sys
import numpy as np
import glob
import matplotlib.pyplot as plt
filePaths = glob.glob("*.tif")
for filePath in filePaths:
image = img_as_float(io.imread(filePath))
white = np.array([1, 1, 1, 1])
mask = np.abs(image - white).sum(axis=2) < 0.05
coords = np.array(np.nonzero(~mask))
top_left = np.min(coords, axis=1)
bottom_right = np.max(coords, axis=1)
out = image[top_left[0]:bottom_right[0],
top_left[1]:bottom_right[1]]
plt.imshow(out)
# plt.show()
fig = plt.gcf()
for ax in fig.axes:
ax.axis('off')
ax.margins(0,0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
fig.savefig(filePath + ".png", dpi=1200, bbox_inches="tight", pad_inches=0)

Using imshow methods in a for loop to print multiple images

I have a simple 2d numpy array which is a pixel map of a gray scale image. I am trying to print some parts of the image. My code is
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread('/content/drive/My Drive/Colab Notebooks/sample2.jpg') # the source file is correctly mounted
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
And
i = 0
while i < (len(roi) - 1): # roi is a list of strictly increasing positive integers
print(roi[i], roi[i+1])
plt.imshow(img_gray[roi[i]:roi[i+1]], cmap='gray')
i += 1
For example if roi = [10, 40, 50, 100], it should prints two parts of the image. But as I run the cell above, it only print one image which is the last part of the image. Is it possible not to overwrite other image and print them all?
You should try calling plt.show() after each plt.imshow(...):
i = 0
while i < (len(roi) - 1): # roi is a list of strictly increasing positive integers
print(roi[i], roi[i+1])
plt.imshow(img_gray[roi[i]:roi[i+1]], cmap='gray')
plt.show() # <----- this will show all plots
i += 1
Or, if you want to keep a nicer, more organized plot, you could use subplots, although you should state how many subplots you want, here is an example with random input:
import matplotlib.pyplot as plt
import numpy as np
ims = np.random.randn(3, 224, 224)
fig, ax = plt.subplots(1, 3)
for i in range(3):
ax[i].imshow(ims[i])
This last example will plot the images arranged horizontally:

How can I make the pixels of each intensity value in an individual list in python?

I need to make the pixels with the same intensity value in a list so I will get a list of lists of intensity values and each list has the pixels with the same intensity value.
import nibabel as nib
import numpy as np
from PIL import Image
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import asarray
from scipy import ndimage, misc, stats
import cv2
### Load the image
img = nib.load('input_dir/FLAIR1.nii.gz')
img_data = img.get_fdata()
imgs = img_data[:, :, 23]
rows, col = imgs.shape
my_nlist = [[] for k in range(y.shape[0])]
print(im_arr[2,2])
for i in range(0, rows):
for j in range(0, col):
pixel = imgs[i, j]
for k in range(y.shape[0]):
if pixel == [k]:
my_nlist[k].append(pixel)
Don't make it harder on yourself than you have to :) this is exactly what a histogram is meant for!
import matplotlib.pyplot as plt
import cv2 as cv # You won't be needing cv2, it is just needed to create this example
img = cv.imread('dir/to/img', 0) # notice the grayscale flag
hist, bins, _ = plt.hist(img) # hist contains the list of pixels with the same intensity for each intensity in the picture
plt.show() # in case you want to visualize it
This is what it will look like:

How can I change colors in contours (obtained from non-Python) with Python?

I am trying to convert the color map of a contour generated from non-Python application. I tried using Matthias Bussonnier's code available here, but is unable to give me a full conversion. I tried to truncate the color map to give me a full conversion, but again does not give me a complete conversion.
MWE
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
import matplotlib.image as mpimg
from scipy.spatial import cKDTree
import matplotlib
import matplotlib.cm as mplcm
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
cmap = plt.get_cmap('jet')
cmap = truncate_colormap(cmap, 0.1, 0.9)
img = mpimg.imread('./test.png')[:,:,:3]
##interact(sub=(0, 500), d=(0,1,0.05))
def convert(sub=256,d=0.1, cin=cmap, cout='viridis'):
viridis = plt.get_cmap(cout)
jet = plt.get_cmap(cin)
jet256 = colors.makeMappingArray(sub, jet)[:, :3]
K = cKDTree(jet256)
oshape = img.shape
img_data = img.reshape((-1,3))
res = K.query(img_data, distance_upper_bound=d)
indices = res[1]
l = len(jet256)
indices = indices.reshape(oshape[:2])
remapped = indices
indices.max()
mask = (indices == l)
remapped = remapped / (l-1)
mask = np.stack( [mask]*3, axis=-1)
blend = np.where(mask, img, viridis(remapped)[:,:,:3])
fig, ax = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(10)
ax.imshow(blend)
fig.savefig('viridize.pdf')
convert()
Input image
Output image
How do I get a complete conversion of the color map (jet in this case) to viridis with Python?
As commented, the solution from How I can specify how rainbow color scheme should be converted to grayscale
will work, but with some small modifications.
I.e. you need to apply your target colormap to the values optained from that solution and hence modify the resulting array size to be 3D.
The conditions for this to work are:
You know the colormap that the original image has been produced with (origin_cmap)
All colors in that image are either grey scale (axes, text etc.) or part of that origin_cmap. I.e. there should not be any other line plot or similar in addition in the figure.
The original colormap is unambiguous, i.e. does not contain the same color twice.
The full range of the original colormap has been used to create the input image and the full range of the target colormap will be aimed for. (This condition can be weakend though if needed, by specifying a different norm and/or range)
The following will hence "viridify" a given image.
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
image = plt.imread("https://i.stack.imgur.com/NyLq2.png")
def changecolormap(image, origin_cmap, target_cmap):
r = np.linspace(0,1, 256)
norm = matplotlib.colors.Normalize(0,1)
mapvals = origin_cmap(norm(r))[:,:3]
def get_value_from_cm(color):
color=matplotlib.colors.to_rgb(color)
#if color is already gray scale, dont change it
if np.std(color) < 0.1:
return color
#otherwise return value from colormap
distance = np.sum((mapvals - color)**2, axis=1)
return target_cmap(r[np.argmin(distance)])[:3]
newim = np.zeros_like(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
c = image[i,j,:3]
newim[i,j, :3] = get_value_from_cm(c)
return newim
fig, (ax,ax2) = plt.subplots(ncols=2)
ax.imshow(image)
ax2.imshow(changecolormap(image, plt.cm.jet, plt.cm.viridis))
ax.axis("off")
ax2.axis("off")
plt.show()

Categories

Resources