Using imshow methods in a for loop to print multiple images - python

I have a simple 2d numpy array which is a pixel map of a gray scale image. I am trying to print some parts of the image. My code is
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread('/content/drive/My Drive/Colab Notebooks/sample2.jpg') # the source file is correctly mounted
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
And
i = 0
while i < (len(roi) - 1): # roi is a list of strictly increasing positive integers
print(roi[i], roi[i+1])
plt.imshow(img_gray[roi[i]:roi[i+1]], cmap='gray')
i += 1
For example if roi = [10, 40, 50, 100], it should prints two parts of the image. But as I run the cell above, it only print one image which is the last part of the image. Is it possible not to overwrite other image and print them all?

You should try calling plt.show() after each plt.imshow(...):
i = 0
while i < (len(roi) - 1): # roi is a list of strictly increasing positive integers
print(roi[i], roi[i+1])
plt.imshow(img_gray[roi[i]:roi[i+1]], cmap='gray')
plt.show() # <----- this will show all plots
i += 1
Or, if you want to keep a nicer, more organized plot, you could use subplots, although you should state how many subplots you want, here is an example with random input:
import matplotlib.pyplot as plt
import numpy as np
ims = np.random.randn(3, 224, 224)
fig, ax = plt.subplots(1, 3)
for i in range(3):
ax[i].imshow(ims[i])
This last example will plot the images arranged horizontally:

Related

skimage.measure.regionprops labels' corresponding measurements?

Using scikit library I was analysing the defects' area and mean diameter. Here is the code and the respective segmented regions.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage import measure, io, img_as_ubyte
from skimage.color import label2rgb, rgb2gray
img = cv2.imread("F:\py_image_pro\pore.jpg", 0)
scale = 0.086 #1 pixel in microns
from skimage.filters import threshold_otsu
threshold = threshold_otsu(img)
thresholded_img = img < threshold
#plt.imshow(thresholded_img, cmap='gray')
#plt.show()
from skimage.segmentation import clear_border
edge_touching_removed = clear_border(thresholded_img)
label_image = measure.label(edge_touching_removed, connectivity=img.ndim)
#plt.imshow(label_image)
#plt.show()
image_label_overlay = label2rgb(label_image, image=img)
plt.imshow(image_label_overlay)
plt.show()
props = measure.regionprops_table(label_image, img, properties=['label', 'area', 'equivalent_diameter', 'mean_intensity', 'solidity'])
import pandas as pd
df = pd.DataFrame(props)
df = df[df['area'] > 20]
df['area_in_microns'] = df['area'] * (scale**2)
df['equivalent_diameter_microns'] = df['equivalent_diameter'] * (scale)
print(df.head())
Used regionprops to measure the segmented regions. Segmented image
I would like to know if there is any way to display the labels in the output image so that segmented labels' corresponding measurements can be known?
Are you asking whether you can display a colormapped version of the measurements on top of the image? If so, the answer is yes! You can use skimage.util.map_array for this.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage import (
color, data, filters, measure,
morphology, segmentation, util
)
# grab the image
coins = data.coins()
# segment the image; from:
# https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_expand_labels.html
edges = filters.farid(coins)
markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2
watershed = segmentation.watershed(edges, markers)
segmented_raw = measure.label(watershed == 2)
# remove tiny background objects due to noise
segmented = morphology.remove_small_objects(segmented_raw, 64)
# measure regionprops
table = pd.DataFrame(measure.regionprops_table(
segmented, coins, properties=('label', 'area')
))
# map the labels to measured properties
colored_by_area = util.map_array(
segmented,
np.asarray(table['label']),
np.asarray(table['area']).astype(float),
)
# set 0 to nan, so it appears as transparent in pyplot.imshow
colored_by_area[colored_by_area==0] = np.nan
# display the results
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
colored_by_label = color.label2rgb(segmented, image=coins, bg_label=0)
axes[0].imshow(colored_by_label)
axes[0].set_axis_off()
axes[0].set_title('segmentation')
axes[1].imshow(coins, cmap='gray')
axim = axes[1].imshow(colored_by_area, cmap='viridis')
axes[1].set_axis_off()
axes[1].set_title('segment area')
plt.colorbar(axim, ax=axes[1], fraction=0.05, label='area (px)')
plt.show()

Keep the original shape of the array as the image

I have some data. I visualize and then save it as image.
import cv2
import numpy as np
import matplotlib.pyplot as plt
data = np.array([
[1,2,0,1],
[0,1,2,1],
[0,0,2,1]])
fig, ax = plt.subplots()
ax.imshow(data)
ax.axis('off')
fig.savefig("test.png", bbox_inches='tight', pad_inches=0)
Next, I load the image and read the shape:
img = cv2.imread('test.png')
print(img.shape)
Output:
(217, 289, 3)
But I want to keep the original resolution and my expected output:
(3, 4, 3)
Any solution?
Upd.:
With dpi=1:
data = np.array([
[1,2,0,1],
[0,1,2,1],
[0,0,2,1],
[1,0,2,1],
[4,1,0,2],
])
fig, ax = plt.subplots()
ax.imshow(data)
ax.axis('off')
fig.savefig("test.png", bbox_inches='tight', pad_inches=0, dpi = 1)
img = cv2.imread('test.png')
img.shape
print(data.shape, img.shape)
Output:
(5, 4)
(3, 2, 3)
Since you're using two different libraries for creating an image and reading the image, it would be difficult to retain the array size as no such information is stored with the image.
The dpi is also specific to your monitor screen and hence is not recommended. Refer to the answer here for more on this.
Also, you're trying to write the image as a 2D array, but when cv2.imread() reads it, it would also consider the color channel and add the third dimension. To avoid this you need to read the image as a grayscale image.
I would suggest that you use cv2.imwrite() to generate the image (works similar to plt.savefig()) and then read the image using cv2.imshow() as a grayscale image.
import cv2
import numpy as np
data = np.array([
[1,2,0,1],
[0,1,2,1],
[0,0,2,1]])
cv2.imwrite("test.png", data)
img = cv2.imread("test.png", 0) #Using 0 to read in grayscale mode
print(data.shape, img.shape)
Output:
(3, 4) (3, 4)
The creation of an image using imshow is totally unnecessary, you can simply compute the matrix of RGBA values that you are interested into
import numpy as np
import matplotlib as mp
data = np.array([ [1,2,0,1],[0,1,2,1],[0,0,2,1]])
n = mp.colors.Normalize(data.min(), data.max())
c = mp.cm.viridis(n(data))[:,:,:-1] # [...,:-1] disregards the alpha values

What is the difference between PIL's and OpenCV's resize

I came over the following issue: the resize functions of these two libraries behave differently. Here is a small test:
import numpy as np
import PIL
import cv2
from matplotlib import pyplot as plt
img = np.random.randn(10, 10, 3)
SIZE = (5, 5)
img -= img.min()
img /= img.max()
img = (img*255).astype(np.uint8)
# Display the initial image
plt.figure(figsize=(16,9))
plt.imshow(img)
plt.show()
plt.close()
# resize the image in two different ways
img_cv2 = cv2.resize(img, dsize=SIZE, interpolation=cv2.INTER_LINEAR)
img_pil = PIL.Image.fromarray(img).resize(SIZE, resample=PIL.Image.BILINEAR)
# get the difference image and normalize it
diff = np.abs(img_cv2.astype(np.float32) - img_pil)
diff /= diff.max() or 1
# display results
fig, axs = plt.subplots(1, 3, figsize=(16, 9))
axs[0].imshow(img_cv2)
axs[1].imshow(img_pil)
axs[2].imshow(diff)
plt.show()
plt.close()
My question is now: why is this happening? Is the difference in the implementation (I didn't check the code in PIL or OpenCV yet) or am I using the functions in the wrong way?
Here are some example outputs: Input image and Resized images.

Python SVD fix the number of eigenvalues to rebuild the image?

I am trying to rebuild an image that I previously decomposed with SVD. The image is this:
I successfully decomposed the image with this code:
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
img = Image.open('steve.jpg')
img = np.mean(img, 2)
U,s,V = np.linalg.svd(img)
s an array of the singular values of the image. The more singular values I take, the more the reconstructed image is similar to the original one.
For example, if I take 20 singular values:
n = 20
S = np.zeros(np.shape(img))
for i in range(0, n):
S[i, i] = s[i]
recon_img = U#S#V
plt.imshow(recon_img)
plt.axis('off')
plt.show()
I would like to fix the minumum number of singular values in order to get a good result: an image pretty similary to the original one. Moreover, I would like to see how much the result changes when I take a higher number of singular values. I tried with an animation without success:
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
img = Image.open('steve.jpg')
img = np.mean(img, 2)
U,s,V = np.linalg.svd(img)
fig = plt.figure()
def update(i):
S = np.zeros(np.shape(img))
n = 20
for i in range(0, n):
S[i, i] = s[i]
recon_img = U#S#V
plt.imshow(recon_img)
plt.axis('off')
ani = FuncAnimation(fig = fig, func = update, frames = 20, interval = 10)
plt.show()
If you plot the s singular values you can see a very steep decreasing curve, better if you use a log scale for the y axis:
plt.semilogy(s, 'k-')
As you can see, the first 50 singular values are the most important ones: almost everyone more that 1000. Values from the ~50th to the ~250th are an order of magnitude lower and their values decreases slowly: the slope of the curve is contained (remember the logarithmic y scale). That beeing said I would take the first 50 elements to rebulid your image.
Regarding the animation:
while the animation updates frame by frame, the counter i is increased by 1. In your code, you mistakenly use i to slice the s and define S; you should rename the counter.
Moreover, as animation goes on, you need to take an increasing number of singular values, this is set by n which you keep constant frame by frame. You need to update n at each loop, so you can use it as the counter.
Furthermore, you need the erase the previous plotted image, so you need to add a plt.gca().cla() at the beginning of the update function.
Check the code below for reference:
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
img = Image.open('steve.jpg')
img = np.mean(img, 2)
U,s,V = np.linalg.svd(img)
fig, ax = plt.subplots(1, 2, figsize = (4, 4))
ax[0].imshow(img)
ax[0].axis('off')
ax[0].set_title('Original')
def init():
ax[1].cla()
ax[1].imshow(np.zeros(np.shape(img)))
ax[1].axis('off')
ax[1].set_title('Reconstructed\nn = 00')
def update(n):
ax[1].cla()
S = np.zeros(np.shape(img))
for i in range(0, n):
S[i, i] = s[i]
recon_img = U#S#V
ax[1].imshow(recon_img)
ax[1].axis('off')
ax[1].set_title(f'Reconstructed\nn = {n:02}')
ani = FuncAnimation(fig = fig, func = update, frames = 50, init_func = init, interval = 10)
ani.save('ani.gif', writer = 'imagemagick')
plt.show()
which gives this animation:
As you can see, the first 50 elements are enough to rebuild you image pretty well. The rest of the elements adds some noise and changes a little the background.

How can I change colors in contours (obtained from non-Python) with Python?

I am trying to convert the color map of a contour generated from non-Python application. I tried using Matthias Bussonnier's code available here, but is unable to give me a full conversion. I tried to truncate the color map to give me a full conversion, but again does not give me a complete conversion.
MWE
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
import matplotlib.image as mpimg
from scipy.spatial import cKDTree
import matplotlib
import matplotlib.cm as mplcm
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
cmap = plt.get_cmap('jet')
cmap = truncate_colormap(cmap, 0.1, 0.9)
img = mpimg.imread('./test.png')[:,:,:3]
##interact(sub=(0, 500), d=(0,1,0.05))
def convert(sub=256,d=0.1, cin=cmap, cout='viridis'):
viridis = plt.get_cmap(cout)
jet = plt.get_cmap(cin)
jet256 = colors.makeMappingArray(sub, jet)[:, :3]
K = cKDTree(jet256)
oshape = img.shape
img_data = img.reshape((-1,3))
res = K.query(img_data, distance_upper_bound=d)
indices = res[1]
l = len(jet256)
indices = indices.reshape(oshape[:2])
remapped = indices
indices.max()
mask = (indices == l)
remapped = remapped / (l-1)
mask = np.stack( [mask]*3, axis=-1)
blend = np.where(mask, img, viridis(remapped)[:,:,:3])
fig, ax = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(10)
ax.imshow(blend)
fig.savefig('viridize.pdf')
convert()
Input image
Output image
How do I get a complete conversion of the color map (jet in this case) to viridis with Python?
As commented, the solution from How I can specify how rainbow color scheme should be converted to grayscale
will work, but with some small modifications.
I.e. you need to apply your target colormap to the values optained from that solution and hence modify the resulting array size to be 3D.
The conditions for this to work are:
You know the colormap that the original image has been produced with (origin_cmap)
All colors in that image are either grey scale (axes, text etc.) or part of that origin_cmap. I.e. there should not be any other line plot or similar in addition in the figure.
The original colormap is unambiguous, i.e. does not contain the same color twice.
The full range of the original colormap has been used to create the input image and the full range of the target colormap will be aimed for. (This condition can be weakend though if needed, by specifying a different norm and/or range)
The following will hence "viridify" a given image.
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
image = plt.imread("https://i.stack.imgur.com/NyLq2.png")
def changecolormap(image, origin_cmap, target_cmap):
r = np.linspace(0,1, 256)
norm = matplotlib.colors.Normalize(0,1)
mapvals = origin_cmap(norm(r))[:,:3]
def get_value_from_cm(color):
color=matplotlib.colors.to_rgb(color)
#if color is already gray scale, dont change it
if np.std(color) < 0.1:
return color
#otherwise return value from colormap
distance = np.sum((mapvals - color)**2, axis=1)
return target_cmap(r[np.argmin(distance)])[:3]
newim = np.zeros_like(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
c = image[i,j,:3]
newim[i,j, :3] = get_value_from_cm(c)
return newim
fig, (ax,ax2) = plt.subplots(ncols=2)
ax.imshow(image)
ax2.imshow(changecolormap(image, plt.cm.jet, plt.cm.viridis))
ax.axis("off")
ax2.axis("off")
plt.show()

Categories

Resources