I'm trying to convert a figure created with Matplotlib imshow into RGBA values but I got the following error:
ValueError: not enough values to unpack (expected 4, got 0)
This is my code:
speed0 = speed[0, :, :].values
figsize = (7, 7)
cbarkw = dict(shrink=0.6, extend='both')
fig, ax = plt.subplots(figsize=figsize)
i = plt.imshow(speed0, origin='lower')
cbar = plt.colorbar(i, **cbarkw)
plt.axis('off')
def matplotlib_to_opencv(i):
image = i._rgbacache
r, g, b, a = cv2.split(image)
return np.flipud(cv2.merge([b, g, r, a]))
image = matplotlib_to_opencv(i)
Where speed0 is a wind dataset of (192x111). I think 'image' is a null cache and hence cv2.split can't read it but I don't know how to make it work properly. Ideas?
Thank you in advance.
What I think you should do to get what you want is change the call to make_image
import numpy as np
import matplotlib.pyplot as plt
import cv2
speed = np.random.random((4, 192, 111))
speed0 = speed[0, :, :]
figsize = (7, 7)
cbarkw = dict(shrink=0.6, extend='both')
fig, ax = plt.subplots(figsize=figsize)
im = plt.imshow(speed0, origin='lower')
cbar = plt.colorbar(im, **cbarkw)
plt.axis('off')
def matplotlib_to_opencv(im):
image = im.make_image('TkAgg')
# this returns
# -------
# image : (M, N, 4) uint8 array
# The RGBA image, resampled unless *unsampled* is True.
# x, y : float
# The upper left corner where the image should be drawn, in pixel
# space.
# trans : Affine2D
# The affine transformation from image to pixel space.
# """
# So you just want the first
r, g, b, a = cv2.split(image[0])
return np.flipud(cv2.merge([b, g, r, a]))
image = matplotlib_to_opencv(im)
plt.show()
Since I did not have your dataset I'm not 100% sure this is what you wanted. But I believe it should work.
Related
Lets say I have a simple 2D numpy array that I display with imshow():
import numpy as np
import random
import matplotlib.pyplot as plt
a = np.random.randint(2, size=(10,10))
im = plt.imshow(a, cmap='spring', interpolation='none', vmin=0, vmax=1, aspect='equal')
plt.show()
And I have another 2D numpy array, like so:
bnd = np.zeros((10,10))
bnd[2,3] = bnd[3,2:5] = bnd[4,3] = 1
bnd[6,6] = bnd[7,5:8] = bnd[8,6] = 1
plt.imshow(bnd)
plt.show()
How can I generate an outline of all the continuous values of "1" in bnd and then overplot it on a, so I get something like the following (I manually added the black lines in the example below)?
You can compute the borders of the mask by finding the starting and ending indices of consecutive ones and converting those to border segments with coordinates of the image.
Setting up the image and the mask
import numpy as np
import matplotlib.pyplot as plt
a = np.random.randint(2, size=(10,10))
plt.imshow(a, cmap='spring', interpolation='none', vmin=0, vmax=1, aspect='equal')
bnd = np.zeros((10,10))
kernel = [[0,1,0],
[1,1,1],
[0,1,0]]
bnd[2:5, 2:5] = bnd[6:9, 5:8] = kernel
Finding the indices and convert them to coordinates of the image
# indices to vertical segments
v = np.array(np.nonzero(np.diff(bnd, axis=1))).T
vs = np.repeat(v, 3, axis=0) - np.tile([[1, 0],[0, 0],[np.nan, np.nan]], (len(v),1))
# indices to horizontal segments
h = np.array(np.nonzero(np.diff(bnd, axis=0))).T
hs = np.repeat(h, 3, axis=0) - np.tile([[0, 1],[0, 0],[np.nan, np.nan]], (len(h),1))
# convert to image coordinates
bounds = np.vstack([vs,hs])
x = np.interp(bounds[:,1], plt.xlim(), (0, bnd.shape[1]))
y = np.interp(bounds[:,0], sorted(plt.ylim()), (0, bnd.shape[0]))
plt.plot(x, y, color=(.1, .1, .1, .6), linewidth=5)
plt.show()
Output
I am trying to get an elevation for each pixel in the image using image processing by python. My first try is by converting the image to grayscale and covert the 2d image to 3d image by using the following code:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imread
imageFile = 'D:\Books\Pav Man\PICS\pic (17) - Copy.png'
mat = imread(imageFile)
mat = mat[:,:,0] # get the first channel
#mat = mat - np.full_like(mat , mat.mean()) #Use this to get negative value
rows, cols = mat.shape
xv, yv = np.meshgrid(range(cols), range(rows)[::-1])
blurred = ndimage.gaussian_filter(mat, sigma=(5, 5), order=0)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(221)
ax.imshow(mat, cmap='gray')
ax = fig.add_subplot(222, projection='3d')
ax.elev= 75
ax.plot_surface(xv, yv, mat)
ax = fig.add_subplot(223)
ax.imshow(blurred, cmap='gray')
ax = fig.add_subplot(224, projection='3d')
ax.elev= 75
ax.plot_surface(xv, yv, blurred)
plt.show()
The mat contains x,y,z values for each pixel, x = width coordinate , y = height coordinate , z = grayscale value that ranges from 0 to 1 but it does not include the real elevation.
The second try is by using depth data from 2 images as mentioned in the following link:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_depthmap/py_depthmap.html
But there is no clear way to estimate or predict the elevation of points in the image.
The following picture describes what I mean:
click here to show picture
My question is how to get the elevation of each point in the image to create a topographic profile?
I am trying to plot multiple images in a figure using matplotlib.
Basically, I read the images using PIl library, convert it to numpy array and do some operation on it (setting the elements in a row to zero). Everything works fine till this point. But when I try to save the results using matplotlib, I get inconsistent results.
Please have a look at my code.
Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as PI
Loading the file
fileName = 'n01978287_43.jpg'
img = PI.open(fileName)
size = 224
img = img.resize((size, size))
img = np.asarray(img, dtype=np.uint8).astype(np.float32)
img = img/255
Result 1
temp_img = np.copy(img)
temp_img[51, :, :] = 0*temp_img[51, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 6, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 6, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_516.png')
plt.close(fig)
Result 2
temp_img = np.copy(img)
temp_img[52, :, :] = 0*temp_img[52, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 6, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 6, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_526.png')
plt.close(fig)
Result 3
temp_img = np.copy(img)
temp_img[51, :, :] = 0*temp_img[51, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 2, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 2, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_512.png')
plt.close(fig)
Result 4
temp_img = np.copy(img)
temp_img[56, :, :] = 0*temp_img[56, :, :]
fig = plt.figure()
ax1 = plt.subplot(1, 2, 1)
ax1.imshow(img, interpolation='none')
ax2 = plt.subplot(1, 2, 2)
ax2.imshow(temp_img, interpolation='none')
plt.savefig('test_562.png')
plt.close(fig)
Now, if you look at the results, you would notice the inconsistency.
Firstly, for first two images (figure with 6 axes), you see the black line only in one of the image. (There is a pattern to this if you zero out all the rows (one at a time) and then try to save the results).
In the last two images, black line gets thicker. (I didn't find any pattern in this case).
System Setup - Python3, Matplotlib3, PIL, Numpy
Update:
After looking for ways to save a figure with the desired resolution (224*224 in this case), I wrote the following code (using multiple resources from web).
Importing libraries and loading the image file
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
fileName = 'n01978287_43.jpg'
img = Image.open(fileName)
size = 224
img = img.resize((size, size))
img = np.asarray(img, dtype=np.uint8).astype(np.float32)
img = img/255
Function to plot the grid of images
def plt_save(grid, idx, name):
nRows = len(grid)
nCols = len(grid[0])
print('Clearing figure')
plt.rcParams.update({'font.size': 8})
wFig = (nCols+2) # Figure width (two more than nCols because I want to add ylabels on the very left and very right of figure)
hFig = (nRows+1) # Figure height (one more than nRows becasue I want to add xlabels to the top of figure)
fig = plt.figure(figsize=( wFig, hFig ))
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
fig.patch.set_facecolor('grey')
for r in range(nRows):
for c in range(nCols):
ax = plt.subplot2grid( shape=[hFig, wFig], loc=[r+1, c+1] )
im= ax.imshow(grid[r][c], interpolation='none')
ax.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
#fig.colorbar(im, ax=ax)
#ax.set_aspect('auto')
if not r:
ax.set_title('Image',
rotation=22.5,
horizontalalignment='left',
verticalalignment='bottom')
if not c:
ax.set_ylabel('leftLabel',
rotation=0,
horizontalalignment='right',
verticalalignment='center')
if c == wFig-3:
ax2 = ax.twinx()
#ax2.axis('off')
ax2.set_xticks([])
ax2.set_yticks([])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.set_ylabel( 'rightLabel',
rotation=0,
verticalalignment='center',
horizontalalignment='left' )
print('Saving file')
plt.savefig( ( str(idx) + '_' + name + '_' + fileName.split('.')[0] + '.png'),
orientation='landscape',
#bbox_inches='tight',
facecolor = fig.get_facecolor(),
dpi=224, # DPI is 224 becasue the axis size is 1x1 inch and I want 224x224 pixels in each axis
transparent=True,
frameon=False )
plt.close(fig)
Loop to zero out the rows of the image (one at a time)
for i in range(0, 224):
temp_img = np.copy(img)
temp_img[i, :, :] = 0*temp_img[i, :, :]
# 1*4 Grid of images (can vary based on the requirement)
grid = [img, temp_img, img, temp_img]
grid = [grid, grid] #2*4 grid of images
plt_save(grid, i, 'PLT_')
Here is how one of the 224 images looks like.
The thing is that it works perfectly as long as I stick with this kind of plot. But the moment I try to make some changes (like adding a colorbar, having some spaces between each axis etc), the image resolution changes. If I use bbox_inches = 'tight' while saving my figure, it adjusts everything but changes the original resolution while keeping the figure size constant.
Is there any other way similar to bbox_inches='tight' such that it can keep the axis resolution fixed while adjusting the figure size accordingly. Or if there is no such thing in matplotlib, could you suggest me any other way to incorporate colorbar (small spaces between axis, ylabel for each axis etc) while keeping the image resolution fixed.
The image you start with has 224 pixels in height.
In the first two cases you distribute those over 72 pixels in the resulting image. This means any row of the image has a 72/224=32% chance of showing up in the final plot. In row number 52 you are lucky and hit this one third chance.
In the second two cases the resulting image is 226 pixels in height (i.e. just slightly larger than the original). Here you have a 2/224=0.9% chance that one row will occupy two pixels. In the case of row no. 56 you hit that unlucky chance.
I have 2D array of data sampled along two vectors non-orthogonal a, b
a = |a|.( cos(alfa), sin(alfa) )
b = |b|.( cos(beta), sin(beta) )
(i.e not along orthogonal cartesian direction x, y)
I would like to plot this data un-distorted (i.e. as parallelogram instead of rectangle)
is there any function to do that in matplotlib?
I need it for plotting data like this (c, f , i)
What about using an affine transform as in this example,
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
def get_image():
from scipy import misc
Z = misc.imread('31271907.jpg')
return Z
# Get image
fig, ax = plt.subplots(1,1)
Z = get_image()
# image skew
im = ax.imshow(Z, interpolation='none', origin='lower',
extent=[-2, 4, -3, 2], clip_on=True)
im._image_skew_coordinate = (3, -2)
plt.show()
Which uses the image
and turns it into,
I used histogram equalization and adaptation for erase illumination from the grayscale images, but after the histogram equalization (i used scikit image python library) was good, during image conversion in mahotas something goes wrong. I got a picture total black. How can i fix it?
Source image:
Histogram equalization and adaptation;
Result after mahotas conversion.
conversion code from scikit to mahotas:
binimg = np.array(img_adapteq, dtype=np.bool)
Source code:
import scipy
import numpy as np
import pymorph as pm
import mahotas as mh
from skimage import morphology
from skimage import io
from matplotlib import pyplot as plt
from skimage import data, img_as_float
from skimage import exposure
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
mhgray = mh.imread(path,0)
binimg = mhgray[:,:,0]
print(type(binimg[0][0]))
thresh = mh.otsu(binimg)
gray =( binimg< thresh)
shape = list(gray.shape)
w = 0
if (shape[0] > shape[1]):
shape = shape[0]
else:
shape = shape[1]
if (shape < 100):
w = int((shape/100 )*1.5)
elif(shape > 100 and shape <420):
w = int((shape/100 )*2.5)
else:
w = int((shape/100)*4)
disk7 = pm.sedisk(w)
img = binimg
# Contrast stretching
p2 = np.percentile(img, 2)
p98 = np.percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
f, axes = plt.subplots(2, 4, figsize=(8, 4))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
plt.subplots_adjust(wspace=0.4)
plt.show()
plt.gray()
plt.subplot(121)
plt.title("after histo")
plt.imshow(img_adapteq)
plt.show()
binimg = np.array(img_adapteq, dtype=np.bool)#uint16
plt.gray()
plt.subplot(121)
plt.title("after otsu")
plt.imshow(binimg)
plt.show()
imgbnbin = mh.morph.dilate(binimg, disk7)
#2
plt.gray()
plt.subplot(121)
plt.title("after dilate before close")
plt.imshow(imgbnbin)
plt.show()
imgbnbin = mh.morph.close(imgbnbin, disk7)
#2
plt.gray()
plt.subplot(121)
plt.title("before skeletonize")
plt.imshow(imgbnbin)
plt.show()
imgbnbin = mh.morph.close(imgbnbin, disk7)
out = morphology.skeletonize(imgbnbin>0)
The scikit-image algorithm probably returns a floating point image with values between 0 and 1. If you cast that to bool, you'll get all ones. You probably want
binimg = img_adapteq > 0.5
In general, also take note of the rescale_intensity function, which will take an image with values between 0 and 1 and return an image with values between 0 and 255.
from skimage import exposure
image = rescale_intensity(image, out_range=(0, 255))