Can matplotlib contours match pixel edges? - python

How to outline pixel boundaries in matplotlib? For instance, for a semi-random dataset like the one below,
# the code block that follows is irrelevant
import numpy as np
k = []
for s in [2103, 1936, 2247, 2987]:
np.random.seed(s)
k.append(np.random.randint(0, 2, size=(2,6)))
arr = np.hstack([np.vstack(k)[:, :-1], np.vstack(k).T[::-1].T ])
image = np.zeros(shape=(arr.shape[0]+2, arr.shape[1]+2))
image[1:-1, 1:-1] = arr
it is quite clear that a contour matching the pixel edges of image would be preferred to the default behavior of the contour function, where the contour lines are effectively drawn across the diagonals of edge pixels.
import matplotlib.pyplot as plt
plt.contour(image[::-1], [0.5], colors='r')
How to make the contours align with the pixels? I'm looking for a solution within numpy and matplotlib libraries.

If the image has a resolution of 1 pixel per unit, how would you define the "edge" of a pixel? The notion of "edge" only makes sense in a frame of increased resolution compared to the pixel itself and contour cannot draw any edges if it is working with the same resoltion as the image itself.
On the other hand, it is of course possible to increase the resolution such that the notion "edge" carries a meaning. So let's say we increase the resolution by a factor of 100 we can easily draw the edges using a contour plot.
import matplotlib.pyplot as plt
import numpy as np
k = []
for s in [2103, 1936, 2247, 2987]:
np.random.seed(s)
k.append(np.random.randint(0, 2, size=(2,6)))
arr = np.hstack([np.vstack(k)[:, :-1], np.vstack(k).T[::-1].T ])
image = np.zeros(shape=(arr.shape[0]+2, arr.shape[1]+2))
image[1:-1, 1:-1] = arr
f = lambda x,y: image[int(y),int(x) ]
g = np.vectorize(f)
x = np.linspace(0,image.shape[1], image.shape[1]*100)
y = np.linspace(0,image.shape[0], image.shape[0]*100)
X, Y= np.meshgrid(x[:-1],y[:-1])
Z = g(X[:-1],Y[:-1])
plt.imshow(image[::-1], origin="lower", interpolation="none", cmap="Blues")
plt.contour(Z[::-1], [0.5], colors='r', linewidths=[3],
extent=[0-0.5, x[:-1].max()-0.5,0-0.5, y[:-1].max()-0.5])
plt.show()
For comparison, we can also draw the image itself in the same plot using imshow.

contour_rect_slow draws slingle lines at the boundaries between pixels with values 0 and 1. contour_rect is a more compact version, connecting longer lines to a single line.
Code:
import numpy as np
k = []
for s in [2103, 1936, 2247, 2987]:
np.random.seed(s)
k.append(np.random.randint(0, 2, size=(2,6)))
arr = np.hstack([np.vstack(k)[:, :-1], np.vstack(k).T[::-1].T ])
image = np.zeros(shape=(arr.shape[0]+2, arr.shape[1]+2))
image[1:-1, 1:-1] = arr[::1]
# image[1, 1] = 1
import matplotlib.pyplot as plt
plt.imshow(image, interpolation="none", cmap="Blues")
def contour_rect_slow(im):
"""Clear version"""
pad = np.pad(im, [(1, 1), (1, 1)]) # zero padding
im0 = np.abs(np.diff(pad, n=1, axis=0))[:, 1:]
im1 = np.abs(np.diff(pad, n=1, axis=1))[1:, :]
lines = []
for ii, jj in np.ndindex(im0.shape):
if im0[ii, jj] == 1:
lines += [([ii-.5, ii-.5], [jj-.5, jj+.5])]
if im1[ii, jj] == 1:
lines += [([ii-.5, ii+.5], [jj-.5, jj-.5])]
return lines
def contour_rect(im):
"""Fast version"""
lines = []
pad = np.pad(im, [(1, 1), (1, 1)]) # zero padding
im0 = np.abs(np.diff(pad, n=1, axis=0))[:, 1:]
im1 = np.abs(np.diff(pad, n=1, axis=1))[1:, :]
im0 = np.diff(im0, n=1, axis=1)
starts = np.argwhere(im0 == 1)
ends = np.argwhere(im0 == -1)
lines += [([s[0]-.5, s[0]-.5], [s[1]+.5, e[1]+.5]) for s, e
in zip(starts, ends)]
im1 = np.diff(im1, n=1, axis=0).T
starts = np.argwhere(im1 == 1)
ends = np.argwhere(im1 == -1)
lines += [([s[1]+.5, e[1]+.5], [s[0]-.5, s[0]-.5]) for s, e
in zip(starts, ends)]
return lines
lines = contour_rect(image)
for line in lines:
plt.plot(line[1], line[0], color='r', alpha=1)
Warning: This is significantly slower then mpl.contour for large images..

Related

How can I change the colormap of an existing plot given an image file?

How can a figure using a rainbow colormap, such as figure 1, be converted so that the same data are displayed using a different color map, such as a perceptually uniform sequential map?
Assume that the underlying data from which the original image was generated are not accessible and the image itself must be recolored using only information within the image.
Background information: rainbow color maps tend to produce visual artifacts. See the cyan line near z = -1.15 m? It looks like there's a sharp edge there. But look at the colorbar itself! Even the color bar has an edge there. There's another fake edge in the yellow band that goes vertically near R = 1.45 m. The horizontal yellow stripe may be a real edge in the underlying data, although it's difficult to distinguish that case from a rainbow artifact.
More information:
http://ieeexplore.ieee.org/abstract/document/4118486/
http://matplotlib.org/users/colormaps.html
Here is my best solution so far:
import numpy as np
import scipy
import os
import matplotlib
import copy
import matplotlib.pyplot as plt
from matplotlib.pyplot import imread, imsave
def_colorbar_loc = [[909, 22], [953 - 20, 959]]
def_working_loc = [[95, 189], [857, 708]]
def recolor_image(
filename='image.png',
colorbar_loc=def_colorbar_loc,
working_loc=def_working_loc,
colorbar_orientation='auto',
colorbar_direction=-1,
new_cmap='viridis',
normalize_before_compare=False,
max_rgb='auto',
threshold=0.4,
saturation_threshold=0.25,
compare_hue=True,
show_plot=True,
debug=False,
):
"""
This script reads in an image file (like .png), reads the image's color bar (you have to tell it where), interprets
the color map used in the image to convert colors to values, then recolors those values with a new color map and
regenerates the figure. Useful for fixing figures that were made with rainbow color maps.
Parameters
-----------
:param filename: Full path and filename of the image file.
:param colorbar_loc: Location of color bar, which will be used to analyze the image and convert colors into values.
Pixels w/ 0,0 at top left corner: [[left, top], [right, bottom]]
:param working_loc: Location of the area to recolor. You don't have to recolor the whole image.
Pixels w/ 0,0 at top left corner: [[left, top], [right, bottom]], set to [[0, 0], [-1, -1]] to do everything.
:param colorbar_orientation: Set to 'x', 'y', or 'auto' to specify whether color map is horizontal, vertical,
or should be determined based on the dimensions of the colorbar_loc
:param colorbar_direction: Controls direction of ascending value
+1: colorbar goes from top to bottom or left to right.
-1: colorbar goes from bottom to top or right to left.
:param new_cmap: String describing the new color map to use in the recolored image.
:param normalize_before_compare: Divide r, g, and b each by (r+g+b) before comparing.
:param max_rgb: Do the values of r, g, and b range from 0 to 1 or from 0 to 255? Set to 1, 255, or 'auto'.
:param threshold: Sum of absolute differences in r, g, b values must be less than threshold to be valid
(0 = perfect, 3 = impossibly bad). Higher numbers = less chance of missing pixels but more chance of recoloring
plot axes, etc.
:param saturation_threshold: Minimum color saturation below which no replacement will take place
:param compare_hue: Use differences in HSV instead of RGB to determine with which index each pixel should be
associated.
:param show_plot: T/F: Open a plot to explain what is going on. Also helpful for checking your aim on the colorbar
coordinates and debugging.
:param debug: T/F: Print debugging information.
"""
def printd(string_in):
"""
Prints debugging statements
:param string_in: String to print only if debug is on.
:return: None
"""
if debug:
print(string_in)
return
print('Recoloring image: {:} ...'.format(filename))
# Determine tag name and load original file into the tree
fn1 = filename.split(os.sep)[-1] # Filename without path
fn2 = fn1.split(os.extsep)[0] # Filename without extension (so new filename can be built later)
ext = fn1.split(os.extsep)[-1] # File extension
path = os.sep.join(filename.split(os.sep)[0:-1]) # Path; used later to save results.
a = imread(filename).astype(float)
printd(f'Read image; shape = {np.shape(a)}')
if max_rgb == 'auto':
# Determine if values of R, G, and B range from 0 to 1 or from 0 to 255
if a.max() > 1:
max_rgb = 255.0
else:
max_rgb = 1.0
# Normalize a so RGB values go from 0 to 1 and are floats.
a /= max_rgb
# Extract the colorbar
x = np.array([colorbar_loc[0][0], colorbar_loc[1][0]])
y = np.array([colorbar_loc[0][1], colorbar_loc[1][1]])
cb = a[y[0]:y[1], x[0]:x[1]]
# Take just the working area, not the whole image
xw = np.array([working_loc[0][0], working_loc[1][0]])
yw = np.array([working_loc[0][1], working_loc[1][1]])
a1 = a[yw[0]:yw[1], xw[0]:xw[1]]
# Pick color bar orientation
if colorbar_orientation == 'auto':
if np.diff(x) > np.diff(y):
colorbar_orientation = 'x'
else:
colorbar_orientation = 'y'
printd('Auto selected colorbar_orientation')
printd('Colorbar orientation is {:}'.format(colorbar_orientation))
# Analyze the colorbar
if colorbar_orientation == 'y':
cb = np.nanmean(cb, axis=1)
else:
cb = np.nanmean(cb, axis=0)
if colorbar_direction < 0:
cb = cb[::-1]
# Compress colorbar to only count unique colors
# If the array gets too big, it will fill memory and crash python: https://github.com/numpy/numpy/issues/14136
dcb = np.append(1, np.sum(abs(np.diff(cb[:, 0:3], axis=0)), axis=1))
cb = cb[dcb > 0]
# Find and mask of special colors that should not be recolored
n1a = np.sum(a1[:, :, 0:3], axis=2)
replacement_mask = np.ones(np.shape(n1a), bool)
for col in [0, 3]: # Black and white will come out as 0 and 3.
mask_update = n1a != col
if mask_update.max() == 0:
print('Warning: masking to protect special colors prevented all changes to the image!')
else:
printd('Good: Special color mask {:} allowed at least some changes'.format(col))
replacement_mask *= mask_update
if replacement_mask.max() == 0:
print('Warning: replacement mask will prevent all changes to the image! '
'(Reached this point during special color protection)')
printd('Sum(replacement_mask) = {:} (after considering special color {:})'
.format(np.sum(np.atleast_1d(replacement_mask)), col))
# Also apply limits to total r+g+b
replacement_mask *= n1a > 0.75
replacement_mask *= n1a < 2.5
if replacement_mask.max() == 0:
print('Warning: replacement mask will prevent all changes to the image! '
'(Reached this point during total r+g+b+ limits)')
printd('Sum(replacement_mask) = {:} (after considering r+g+b upper threshold)'
.format(np.sum(np.atleast_1d(replacement_mask))))
if saturation_threshold > 0:
hsv1 = matplotlib.colors.rgb_to_hsv(a1[:, :, 0:3])
sat = hsv1[:, :, 1]
printd('Saturation ranges from {:} <= sat <= {:}'.format(sat.min(), sat.max()))
sat_mask = sat > saturation_threshold
if sat_mask.max() == 0:
print('Warning: saturation mask will prevent all changes to the image!')
else:
printd('Good: Saturation mask will allow at least some changes')
replacement_mask *= sat_mask
if replacement_mask.max() == 0:
print('Warning: replacement mask will prevent all changes to the image! '
'(Reached this point during saturation threshold)')
printd(f'shape(a1) = {np.shape(a)}')
printd(f'shape(cb) = {np.shape(cb)}')
# Find where on the colorbar each pixel sits
if compare_hue:
# Difference in hue
hsv1 = matplotlib.colors.rgb_to_hsv(a1[:, :, 0:3])
hsv_cb = matplotlib.colors.rgb_to_hsv(cb[:, 0:3])
d2 = abs(hsv1[:, :, :, np.newaxis] - hsv_cb.T[np.newaxis, np.newaxis, :, :])
# d2 = d2[:, :, 0, :] # Take hue only
d2 = np.sum(d2, axis=2)
printd(' shape(d2) = {:} (hue version)'.format(np.shape(d2)))
else:
# Difference in RGB
if normalize_before_compare:
# Difference of normalized RGB arrays
n1 = n1a[:, :, np.newaxis]
n2 = np.sum(cb[:, 0:3], axis=1)[:, np.newaxis]
w1 = n1 == 0
w2 = n2 == 0
n1[w1] = 1
n2[w2] = 1
d = (a1/n1)[:, :, 0:3, np.newaxis] - (cb/n2).T[np.newaxis, np.newaxis, 0:3, :]
else:
# Difference of non-normalized RGB arrays
d = (a1[:, :, 0:3, np.newaxis] - cb.T[np.newaxis, np.newaxis, 0:3, :])
printd(f'Shape(d) = {np.shape(d)}')
d2 = np.sum(np.abs(d[:, :, 0:3, :]), axis=2) # 0:3 excludes the alpha channel from this calculation
printd('Processed colorbar')
index = d2.argmin(axis=2)
md2 = d2.min(axis=2)
index_valid = md2 < threshold
if index_valid.max() == 0:
print('Warning: minimum difference is greater than threshold: all changes rejected!')
else:
printd('Good: Minimum difference filter is lower than threshold for at least one pixel.')
printd('Sum(index_valid) = {:} (before *= replacement_mask)'.format(np.sum(np.atleast_1d(index_valid))))
printd('Sum(replacement_mask) = {:} (final, before combining w/ index_valid)'
.format(np.sum(np.atleast_1d(replacement_mask))))
index_valid *= replacement_mask
if index_valid.max() == 0:
print('Warning: index_valid mask prevents all changes to the image after combination w/ replacement_mask.')
else:
printd('Good: Mask will allow at least one pixel to change.')
printd('Sum(index_valid) = {:}'.format(np.sum(np.atleast_1d(index_valid))))
value = index/(len(cb)-1.0)
printd('Index ranges from {:} to {:}'.format(index.min(), index.max()))
# Make a new image with replaced colors
b = matplotlib.cm.ScalarMappable(cmap=new_cmap).to_rgba(value) # Remap everything
printd('shape(b) = {:}, min(b) = {:}, max(b) = {:}'.format(np.shape(b), b.min(), b.max()))
c = copy.copy(a1) # Copy original
c[index_valid] = b[index_valid] # Transfer only pixels where color was close to colormap
# Transfer working area to full image
c2 = copy.copy(a) # Copy original full image
c2[yw[0]:yw[1], xw[0]:xw[1], :] = c # Replace working area
c2[:, :, 3] = a[:, :, 3] # Preserve original alpha channel
# Save the image in the same path as the original but with _recolored added to the filename.
new_filename = '{:}{:}{:}_recolored{:}{:}'.format(path, os.sep, fn2, os.extsep, ext)
imsave(new_filename, c2)
print('Done recoloring. Result saved to {:} .'.format(new_filename))
if show_plot:
# Setup figure for showing things to the user
f, axs = plt.subplots(2, 3)
axo = axs[0, 0] # Axes for original figure
axoc = axs[0, 1] # Axes for original color bar
axf = axs[0, 2] # Axes for final figure
axm = axs[1, 1] # Axes for mask
axre = axs[1, 2] # Axes for recolored section only (it might not be the whole figure)
axraw = axs[1, 0] # Axes for raw recoloring result before masking
for ax in axs.flatten():
ax.set_xlabel('x pixel')
ax.set_ylabel('y pixel')
axo.set_title('Original image w/ colorbar ID overlay')
axoc.set_title('Color progression from original colorbar')
axm.set_title('Mask')
axre.set_title('Recolored section')
axraw.set_title('Raw recolor result (no masking)')
axf.set_title('Final image')
axoc.set_xlabel('Index')
axoc.set_ylabel('Value')
# Show the user where they placed the color bar and working location
axo.imshow(a)
xx = x[np.array([0, 0, 1, 1, 0])]
yy = y[np.array([0, 1, 1, 0, 0])]
axo.plot(xx, yy, '+-', label='colorbar')
xxw = xw[np.array([0, 0, 1, 1, 0])]
yyw = yw[np.array([0, 1, 1, 0, 0])]
axo.plot(xxw, yyw, '+-', label='target')
tots = np.sum(cb[:, 0:3], axis=1)
if normalize_before_compare:
# Normalized version
axoc.plot(cb[:, 0] / tots, 'r', label='r/(r+g+b)', lw=2)
axoc.plot(cb[:, 1] / tots, 'g', label='g/(r+g+b)', lw=2)
axoc.plot(cb[:, 2] / tots, 'b', label='b/(r+g+b)', lw=2)
axoc.set_ylabel('Normalized value')
else:
axoc.plot(cb[:, 0], 'r', label='r', lw=2)
axoc.plot(cb[:, 1], 'g', label='g', lw=2)
axoc.plot(cb[:, 2], 'b', label='b', lw=2)
axoc.plot(cb[:, 3], color='gray', linestyle='--', label='$\\alpha$')
axoc.plot(tots, 'k', label='r+g+b')
# Display the new colors with no mask, the mask, and the recolored section
axraw.imshow(b)
axm.imshow(index_valid)
axre.imshow(c)
# Display the final result
axf.imshow(c2)
# Finishing touches on plots
axo.legend(loc=0).set_draggable(True)
axoc.legend(loc=0).set_draggable(True)
plt.show()
return

How to smoothen 2D color map in matplotlib

My question is if there is any way to smoothen 2D color map using matplotlib? My code:
def map():
# setup parameters
j = 0
N = 719
N2 = 35
x = np.linspace(190, 800, N)
y = np.linspace(10, 360, N2) # (1,2,3), 1 - start Temp, 2- end temp + 10K, 3 - how many steps to reach it
z = []
A = np.zeros([35,719]) # [1 2], 1 - number of spectras, 2 - delta wavelength
# run
for i in range(10,360,10):
Z = []
file_no = (str(0) + str(i))[-3:]
data = np.genfromtxt('C:\\Users\\micha_000\\Desktop\\Measure\\' + '160317_LaPONd_g500_%s_radio.txt'%file_no,skip_header = 12)
for line in data:
Z.append(line[1]-6000)
A[j,:] = Z
j = j+1
X, Y = np.meshgrid(x,y)
fig, ax = plt.subplots()
cs = ax.contourf(X, Y, A, cmap=cm.viridis)
norm = colors.Normalize(vmin = 0, vmax = 1)
plt.xlabel('wavelength [nm]')
plt.ylabel('temperature [K]')
plt.title('LaPONd_g500')
cbar = fig.colorbar(cs, norm = norm)
plt.savefig('C:\\Users\\micha_000\\Desktop\\Measure\\LaPONd_g500_radio_map.png')
plt.show()
plt.close()
And here is an example of what i receive:
Is there any way to make it look better by smoothening pixels transitions?
The problem is not the palette (which are all smooth in matplotlib), but that fact that you are using contourf(), which generates a finite set of countours, each with a single color, and is therefore not smooth. The default is something like 10 countours.
One quick solution:, increase the number of contour levels by specifying levels (you can also give an array of which levels to include):
cs = ax.contourf(X, Y, A, cmap=cm.viridis, levels=100)
Better yet, since it seems your data data is already on a grid (e.g. X,Y,Z values for each pixel), you should use pcolormesh(X,Y,A) instead of contour to plot it. That will plot with fully continuous values, rather than steps.
open the png as an array, and blur it with a mean value filter. search convolution filters to learn more. I've just used a 25 pixel square averaging filter, but you could use a gaussian distribution to make it look smoother..
import numpy as np
from scipy import ndimage, signal, misc
img = ndimage.imread('C:/.../Zrj50.png')
#I used msPaint to get coords... there's probably a better way
x0, y0, x1, y1 = 87,215,764,1270 #chart area (pixel coords)
#you could use a gaussian filter to get a rounder blur pattern
kernel = np.ones((5,5),)/25 #mean value convolution
#convolve roi with averaging filter
#red
img[x0:x1, y0:y1, 0] = signal.convolve2d(img[x0:x1, y0:y1, 0], kernel, mode='same', boundary='symm')
#green
img[x0:x1, y0:y1, 1] = signal.convolve2d(img[x0:x1, y0:y1, 1], kernel, mode='same', boundary='symm')
#blue
img[x0:x1, y0:y1, 2] = signal.convolve2d(img[x0:x1, y0:y1, 2], kernel, mode='same', boundary='symm')
#do it again for ledgend area
#...
misc.imsave('C:/.../Zrj50_blurred.png', img)
Using a gaussian instead is pretty easy:
#red
img[x0:x1, y0:y1, 0] = ndimage.gaussian_filter(img[x0:x1, y0:y1, 0], 4, mode='nearest')

In matplotlib, how can I plot a multi-colored line, like a rainbow

I would like to plot parallel lines with different colors. E.g. rather than a single red line of thickness 6, I would like to have two parallel lines of thickness 3, with one red and one blue.
Any thoughts would be appreciated.
Merci
Even with the smart offsetting (s. below), there is still an issue in a view that has sharp angles between consecutive points.
Zoomed view of smart offsetting:
Overlaying lines of varying thickness:
Plotting parallel lines is not an easy task. Using a simple uniform offset will of course not show the desired result. This is shown in the left picture below.
Such a simple offset can be produced in matplotlib as shown in the transformation tutorial.
Method1
A better solution may be to use the idea sketched on the right side. To calculate the offset of the nth point we can use the normal vector to the line between the n-1st and the n+1st point and use the same distance along this normal vector to calculate the offset point.
The advantage of this method is that we have the same number of points in the original line as in the offset line. The disadvantage is that it is not completely accurate, as can be see in the picture.
This method is implemented in the function offset in the code below.
In order to make this useful for a matplotlib plot, we need to consider that the linewidth should be independent of the data units. Linewidth is usually given in units of points, and the offset would best be given in the same unit, such that e.g. the requirement from the question ("two parallel lines of width 3") can be met.
The idea is therefore to transform the coordinates from data to display coordinates, using ax.transData.transform. Also the offset in points o can be transformed to the same units: Using the dpi and the standard of ppi=72, the offset in display coordinates is o*dpi/ppi. After the offset in display coordinates has been applied, the inverse transform (ax.transData.inverted().transform) allows a backtransformation.
Now there is another dimension of the problem: How to assure that the offset remains the same independent of the zoom and size of the figure?
This last point can be addressed by recalculating the offset each time a zooming of resizing event has taken place.
Here is how a rainbow curve would look like produced by this method.
And here is the code to produce the image.
import numpy as np
import matplotlib.pyplot as plt
dpi = 100
def offset(x,y, o):
""" Offset coordinates given by array x,y by o """
X = np.c_[x,y].T
m = np.array([[0,-1],[1,0]])
R = np.zeros_like(X)
S = X[:,2:]-X[:,:-2]
R[:,1:-1] = np.dot(m, S)
R[:,0] = np.dot(m, X[:,1]-X[:,0])
R[:,-1] = np.dot(m, X[:,-1]-X[:,-2])
On = R/np.sqrt(R[0,:]**2+R[1,:]**2)*o
Out = On+X
return Out[0,:], Out[1,:]
def offset_curve(ax, x,y, o):
""" Offset array x,y in data coordinates
by o in points """
trans = ax.transData.transform
inv = ax.transData.inverted().transform
X = np.c_[x,y]
Xt = trans(X)
xto, yto = offset(Xt[:,0],Xt[:,1],o*dpi/72. )
Xto = np.c_[xto, yto]
Xo = inv(Xto)
return Xo[:,0], Xo[:,1]
# some single points
y = np.array([1,2,2,3,3,0])
x = np.arange(len(y))
#or try a sinus
x = np.linspace(0,9)
y=np.sin(x)*x/3.
fig, ax=plt.subplots(figsize=(4,2.5), dpi=dpi)
cols = ["#fff40b", "#00e103", "#ff9921", "#3a00ef", "#ff2121", "#af00e7"]
lw = 2.
lines = []
for i in range(len(cols)):
l, = plt.plot(x,y, lw=lw, color=cols[i])
lines.append(l)
def plot_rainbow(event=None):
xr = range(6); yr = range(6);
xr[0],yr[0] = offset_curve(ax, x,y, lw/2.)
xr[1],yr[1] = offset_curve(ax, x,y, -lw/2.)
xr[2],yr[2] = offset_curve(ax, xr[0],yr[0], lw)
xr[3],yr[3] = offset_curve(ax, xr[1],yr[1], -lw)
xr[4],yr[4] = offset_curve(ax, xr[2],yr[2], lw)
xr[5],yr[5] = offset_curve(ax, xr[3],yr[3], -lw)
for i in range(6):
lines[i].set_data(xr[i], yr[i])
plot_rainbow()
fig.canvas.mpl_connect("resize_event", plot_rainbow)
fig.canvas.mpl_connect("button_release_event", plot_rainbow)
plt.savefig(__file__+".png", dpi=dpi)
plt.show()
Method2
To avoid overlapping lines, one has to use a more complicated solution.
One could first offset every point normal to the two line segments it is part of (green points in the picture below). Then calculate the line through those offset points and find their intersection.
A particular case would be when the slopes of two subsequent line segments equal. This has to be taken care of (eps in the code below).
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
dpi = 100
def intersect(p1, p2, q1, q2, eps=1.e-10):
""" given two lines, first through points pn, second through qn,
find the intersection """
x1 = p1[0]; y1 = p1[1]; x2 = p2[0]; y2 = p2[1]
x3 = q1[0]; y3 = q1[1]; x4 = q2[0]; y4 = q2[1]
nomX = ((x1*y2-y1*x2)*(x3-x4)- (x1-x2)*(x3*y4-y3*x4))
denom = float( (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4) )
nomY = (x1*y2-y1*x2)*(y3-y4) - (y1-y2)*(x3*y4-y3*x4)
if np.abs(denom) < eps:
#print "intersection undefined", p1
return np.array( p1 )
else:
return np.array( [ nomX/denom , nomY/denom ])
def offset(x,y, o, eps=1.e-10):
""" Offset coordinates given by array x,y by o """
X = np.c_[x,y].T
m = np.array([[0,-1],[1,0]])
S = X[:,1:]-X[:,:-1]
R = np.dot(m, S)
norm = np.sqrt(R[0,:]**2+R[1,:]**2) / o
On = R/norm
Outa = On+X[:,1:]
Outb = On+X[:,:-1]
G = np.zeros_like(X)
for i in xrange(0, len(X[0,:])-2):
p = intersect(Outa[:,i], Outb[:,i], Outa[:,i+1], Outb[:,i+1], eps=eps)
G[:,i+1] = p
G[:,0] = Outb[:,0]
G[:,-1] = Outa[:,-1]
return G[0,:], G[1,:]
def offset_curve(ax, x,y, o, eps=1.e-10):
""" Offset array x,y in data coordinates
by o in points """
trans = ax.transData.transform
inv = ax.transData.inverted().transform
X = np.c_[x,y]
Xt = trans(X)
xto, yto = offset(Xt[:,0],Xt[:,1],o*dpi/72., eps=eps )
Xto = np.c_[xto, yto]
Xo = inv(Xto)
return Xo[:,0], Xo[:,1]
# some single points
y = np.array([1,1,2,0,3,2,1.,4,3]) *1.e9
x = np.arange(len(y))
x[3]=x[4]
#or try a sinus
#x = np.linspace(0,9)
#y=np.sin(x)*x/3.
fig, ax=plt.subplots(figsize=(4,2.5), dpi=dpi)
cols = ["r", "b"]
lw = 11.
lines = []
for i in range(len(cols)):
l, = plt.plot(x,y, lw=lw, color=cols[i], solid_joinstyle="miter")
lines.append(l)
def plot_rainbow(event=None):
xr = range(2); yr = range(2);
xr[0],yr[0] = offset_curve(ax, x,y, lw/2.)
xr[1],yr[1] = offset_curve(ax, x,y, -lw/2.)
for i in range(2):
lines[i].set_data(xr[i], yr[i])
plot_rainbow()
fig.canvas.mpl_connect("resize_event", plot_rainbow)
fig.canvas.mpl_connect("button_release_event", plot_rainbow)
plt.show()
Note that this method should work well as long as the offset between the lines is smaller then the distance between subsequent points on the line. Otherwise method 1 may be better suited.
The best that I can think of is to take your data, generate a series of small offsets, and use fill_between to make bands of whatever color you like.
I wrote a function to do this. I don't know what shape you're trying to plot, so this may or may not work for you. I tested it on a parabola and got decent results. You can also play around with the list of colors.
def rainbow_plot(x, y, spacing=0.1):
fig, ax = plt.subplots()
colors = ['red', 'yellow', 'green', 'cyan','blue']
top = max(y)
lines = []
for i in range(len(colors)+1):
newline_data = y - top*spacing*i
lines.append(newline_data)
for i, c in enumerate(colors):
ax.fill_between(x, lines[i], lines[i+1], facecolor=c)
return fig, ax
x = np.linspace(0,1,51)
y = 1-(x-0.5)**2
rainbow_plot(x,y)

border/edge operations on numpy arrays

Suppose I have a 3D numpy array of nonzero values and "background" = 0. As an example I will take a sphere of random values:
array = np.random.randint(1, 5, size = (100,100,100))
z,y,x = np.ogrid[-50:50, -50:50, -50:50]
mask = x**2 + y**2 + z**2<= 20**2
array[np.invert(mask)] = 0
First, I would like to find the "border voxels" (all nonzero values that have a zero within their 3x3x3 neigbourhood). Second, I would like to replace all border voxels with the mean of their nonzero neighbours. So far I tried to use scipy's generic filter in the following way:
Function to apply at each element:
def borderCheck(values):
#check if the footprint center is on a nonzero value
if values[13] != 0:
#replace border voxels with the mean of nonzero neighbours
if 0 in values:
return np.sum(values)/np.count_nonzero(values)
else:
return values[13]
else:
return 0
Generic filter:
from scipy import ndimage
result = ndimage.generic_filter(array, borderCheck, footprint = np.ones((3,3,3)))
Is this a proper way to handle this problem? I feel that I am trying to reinvent the wheel here and that there must be a shorter, nicer way to achieve the result. Are there any other suitable (numpy, scipy ) functions that I can use?
EDIT
I messed one thing up: I would like to replace all border voxels with the mean of their nonzero AND non-border neighbours. For this, I tried to clean up the neighbours from ali_m's code (2D case):
#for each neighbour voxel, check whether it also appears in the border/edges
non_border_neighbours = []
for each in neighbours:
non_border_neighbours.append([i for i in each if nonzero_idx[i] not in edge_idx])
Now I can't figure out why non_border_neighbours comes back empty?
Furthermore, correct me if I am wrong but doesn't tree.query_ball_point with radius 1 adress only the 6 next neighbours (euclidean distance 1)? Should I set sqrt(3) (3D case) as radius to get the 26-neighbourhood?
I think it's best to start out with the 2D case first, since it can be visualized much more easily:
import numpy as np
from matplotlib import pyplot as plt
A = np.random.randint(1, 5, size=(100, 100)).astype(np.double)
y, x = np.ogrid[-50:50, -50:50]
mask = x**2 + y**2 <= 30**2
A[~mask] = 0
To find the edge pixels you could perform binary erosion on your mask, then XOR the result with your mask
# rank 2 structure with full connectivity
struct = ndimage.generate_binary_structure(2, 2)
erode = ndimage.binary_erosion(mask, struct)
edges = mask ^ erode
One approach to find the nearest non-zero neighbours of each edge pixel would be to use a scipy.spatial.cKDTree:
from scipy.spatial import cKDTree
# the indices of the non-zero locations and their corresponding values
nonzero_idx = np.vstack(np.where(mask)).T
nonzero_vals = A[mask]
# build a k-D tree
tree = cKDTree(nonzero_idx)
# use it to find the indices of all non-zero values that are at most 1 pixel
# away from each edge pixel
edge_idx = np.vstack(np.where(edges)).T
neighbours = tree.query_ball_point(edge_idx, r=1, p=np.inf)
# take the average value for each set of neighbours
new_vals = np.hstack(np.mean(nonzero_vals[n]) for n in neighbours)
# use these to replace the values of the edge pixels
A_new = A.astype(np.double, copy=True)
A_new[edges] = new_vals
Some visualisation:
fig, ax = plt.subplots(1, 3, figsize=(10, 4), sharex=True, sharey=True)
norm = plt.Normalize(0, A.max())
ax[0].imshow(A, norm=norm)
ax[0].set_title('Original', fontsize='x-large')
ax[1].imshow(edges)
ax[1].set_title('Edges', fontsize='x-large')
ax[2].imshow(A_new, norm=norm)
ax[2].set_title('Averaged', fontsize='x-large')
for aa in ax:
aa.set_axis_off()
ax[0].set_xlim(20, 50)
ax[0].set_ylim(50, 80)
fig.tight_layout()
plt.show()
This approach will also generalize to the 3D case:
B = np.random.randint(1, 5, size=(100, 100, 100)).astype(np.double)
z, y, x = np.ogrid[-50:50, -50:50, -50:50]
mask = x**2 + y**2 + z**2 <= 20**2
B[~mask] = 0
struct = ndimage.generate_binary_structure(3, 3)
erode = ndimage.binary_erosion(mask, struct)
edges = mask ^ erode
nonzero_idx = np.vstack(np.where(mask)).T
nonzero_vals = B[mask]
tree = cKDTree(nonzero_idx)
edge_idx = np.vstack(np.where(edges)).T
neighbours = tree.query_ball_point(edge_idx, r=1, p=np.inf)
new_vals = np.hstack(np.mean(nonzero_vals[n]) for n in neighbours)
B_new = B.astype(np.double, copy=True)
B_new[edges] = new_vals
Test against your version:
def borderCheck(values):
#check if the footprint center is on a nonzero value
if values[13] != 0:
#replace border voxels with the mean of nonzero neighbours
if 0 in values:
return np.sum(values)/np.count_nonzero(values)
else:
return values[13]
else:
return 0
result = ndimage.generic_filter(B, borderCheck, footprint=np.ones((3, 3, 3)))
print(np.allclose(B_new, result))
# True
I'm sure this isn't the most efficient way to do it, but it will still be significantly faster than using generic_filter.
Update
The performance could be further improved by reducing the number of points that are considered as candidate neighbours of the edge pixels/voxels:
# ...
# the edge pixels/voxels plus their immediate non-zero neighbours
erode2 = ndimage.binary_erosion(erode, struct)
candidate_neighbours = mask ^ erode2
nonzero_idx = np.vstack(np.where(candidate_neighbours)).T
nonzero_vals = B[candidate_neighbours]
# ...

Gabor filter bank - modification of display template

To better understand gabor filters and kernels in image processing, I am trying to place my own images into a Gabor texture comparison template from the scikit-image site.
The code modification I used was adding first line below and pointing second line to my new variable. Previous structured paralleled following line in code quoted below.
img=plt.imread('greenBalloon.png') #This is code I added to read in the local png file
brick = img_as_float(img)[shrink] #Using pre-existing line with my swapped variable
I continue to get errors when trying to swap out the included sample data file with another png present in the working directory. I get the following error:
"RuntimeError: filter weights array has incorrect shape."
How should I re-write this code to pull in a locally saved image or, preferably, set of images in place of the sample(s)?
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as nd
from skimage import data
from skimage.util import img_as_float
from skimage.filter import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = nd.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
img=plt.imread('greenBalloon.png') #This is code I added to read in the local png file
brick = img_as_float(img)[shrink] #Using pre-existing line with my swapped variable
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(nd.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(nd.convolve(image, np.real(kernel), mode='wrap')**2 +
nd.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
Any other insights on processing images with gabor patches would be quite helpful.
just use a black&white version to run the algorithm.
To do it, just pass the argument 'as_grey' to imread.
image = skimage.io.imread(image_path,as_grey=True)

Categories

Resources