I am trying to convert coordinates from one system to another using the affine transfomation method on python. the code seems to be running but the output is looking strange, here is my code:
import json
import math
import numpy as np
labels = open('labels3.txt')
read_ready = labels.read()
JSONfile = json.loads(read_ready)
#input_system should be the ground plane(georefrenced) images
#notes on this set of coordinates, assuming that the points arent rotated, may introduce the rotation in # the very end
input_sys = np.array([[0,0,0],[0,23133,0],[35093,23133,0],[35093,0,0]])
#output system should be the slant range coords, these arent too special should be pretty straightforward
output_sys = np.array([[0,0,0],[0,16384,0],[16384,16384,0],[16384,0,0]])
#now find which point should be transformed
p = np.array([7954,9338,0])
#finding the transformation
l = len(input_sys)
entry = lambda r,d: np.linalg.det(np.delete(np.vstack([r,input_sys.T,np.ones(l)]),d,axis = 0))
M = np.array([[(-1)**i * entry(R,i) for R in output_sys.T] for i in range(l+1)])
A,t = np.hsplit(M[1:].T/(-M[0])[:,None], [l-1])
t = np.transpose(t)[0]
#output transform
print("Affine transformation matrix:\n", A)
print("Affine transformation translation vector:\n", t)
#print("Testing:")
#for p,P in zip(np.array(input_sys), np.array(output_sys)):
#image_p = np.dot(A,p)+t
#result ="[OK]" if np.allclose(image_p,P) else"[ERROR]"
#print(p, " mapped to: ", image_p, " ; expected: ", P, result)
#print('Calculation:')
P = np.dot(A,p)+t
print(p,'mapped to:', P)
the output for the matrix M is a zero matrix which is not correct , can someone help me find a way to get a nonzero matrix out for M?
Related
I'm trying to get cosine similarity for 2 sets of data (with unequal lengths).
Test set contains 4 random similar images from google.
Training set contains 1 similar image to test set from google.
Following the code im using to do the same by converting image to vectors and calculating cosine similarity
import os
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
from img_to_vec import Img2Vec
import numpy as np
test_path = '/Users/Desktop/img_vec/test'
train_path = '/Users/Desktop/img_vec/train'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
# For each test image, we store the filename and vector as key, value in a dictionary
pics = {}
for file in os.listdir(test_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(test_path, filename))
vec = img2vec.get_vec(img)
pics[filename] = vec
# print (pics)
pic_name = {}
for file1 in os.listdir(train_path):
filename1 = os.fsdecode(file1)
img1 = Image.open(os.path.join(train_path, filename1))
vec1 = img2vec.get_vec(img1)
pic_name[filename1] = vec1
# print(pic_name)
vec1 = np.array([pics])
vec2 = np.array([pic_name])
sims = {}
for key in list(pics.keys()):
print(key)
sims[key] = cosine_similarity(vec1[vec2].reshape((1, -1)), vec1[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
However, I'm unable to resolve the following error:
sims[key] = cosine_similarity(vec1[vec2].reshape((1, -1)), vec1[key].reshape((1, -1)))[0][0]
IndexError: arrays used as indices must be of integer (or boolean) type
I tried to compute cosine similarity in Python manually (using numpy) by using a specialised library. It doesn't work. I believe it's an issue with dtype.
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# vectors
a = np.array([1,2,3])
b = np.array([1,1,4])
# manually compute cosine similarity
dot = np.dot(a, b)
norma = np.linalg.norm(a)
normb = np.linalg.norm(b)
cos = dot / (norma * normb)
# use library, operates on sets of vectors
aa = a.reshape(1,3)
ba = b.reshape(1,3)
cos_lib = cosine_similarity(aa, ba)
Any help / guidance / alternative is much appreciated.
vec1 = np.array([pics])
vec2 = np.array([pic_name])
I don't see the need to do this.
Also, in the line where error is coming, the error is present at:
vec1[vec2].reshape((1, -1))
because you're indexing vec1 using vec2. I suppose you mean to put key instead of vec2.
I'm trying to interpolate between two images in Python.
Images are of shapes (188, 188)
I wish to interpolate the image 'in-between' these two images. Say Image_1 is at location z=0 and Image_2 is at location z=2. I want the interpolated image at location z=1.
I believe this answer (MATLAB) contains a similar problem and solution.
Creating intermediate slices in a 3D MRI volume with MATLAB
I've tried to convert this code to Python as follows:
from scipy.interpolate import interpn
from scipy.interpolate import griddata
# Construct 3D volume from images
# arr.shape = (2, 182, 182)
arr = np.r_['0,3', image_1, image_2]
slices,rows,cols = arr.shape
# Construct meshgrids
[X,Y,Z] = np.meshgrid(np.arange(cols), np.arange(rows), np.arange(slices));
[X2,Y2,Z2] = np.meshgrid(np.arange(cols), np.arange(rows), np.arange(slices*2));
# Run n-dim interpolation
Vi = interpn([X,Y,Z], arr, np.array([X1,Y1,Z1]).T)
However, this produces an error:
ValueError: The points in dimension 0 must be strictly ascending
I suspect I am not constructing my meshgrid(s) properly but am kind of lost on whether or not this approach is correct.
Any ideas?
---------- Edit -----------
Found some MATLAB code that appears to solve this problem:
Interpolating Between Two Planes in 3d space
I attempted to convert this to Python:
from scipy.ndimage.morphology import distance_transform_edt
from scipy.interpolate import interpn
def ndgrid(*args,**kwargs):
"""
Same as calling ``meshgrid`` with *indexing* = ``'ij'`` (see
``meshgrid`` for documentation).
"""
kwargs['indexing'] = 'ij'
return np.meshgrid(*args,**kwargs)
def bwperim(bw, n=4):
"""
perim = bwperim(bw, n=4)
Find the perimeter of objects in binary images.
A pixel is part of an object perimeter if its value is one and there
is at least one zero-valued pixel in its neighborhood.
By default the neighborhood of a pixel is 4 nearest pixels, but
if `n` is set to 8 the 8 nearest pixels will be considered.
Parameters
----------
bw : A black-and-white image
n : Connectivity. Must be 4 or 8 (default: 8)
Returns
-------
perim : A boolean image
From Mahotas: http://nullege.com/codes/search/mahotas.bwperim
"""
if n not in (4,8):
raise ValueError('mahotas.bwperim: n must be 4 or 8')
rows,cols = bw.shape
# Translate image by one pixel in all directions
north = np.zeros((rows,cols))
south = np.zeros((rows,cols))
west = np.zeros((rows,cols))
east = np.zeros((rows,cols))
north[:-1,:] = bw[1:,:]
south[1:,:] = bw[:-1,:]
west[:,:-1] = bw[:,1:]
east[:,1:] = bw[:,:-1]
idx = (north == bw) & \
(south == bw) & \
(west == bw) & \
(east == bw)
if n == 8:
north_east = np.zeros((rows, cols))
north_west = np.zeros((rows, cols))
south_east = np.zeros((rows, cols))
south_west = np.zeros((rows, cols))
north_east[:-1, 1:] = bw[1:, :-1]
north_west[:-1, :-1] = bw[1:, 1:]
south_east[1:, 1:] = bw[:-1, :-1]
south_west[1:, :-1] = bw[:-1, 1:]
idx &= (north_east == bw) & \
(south_east == bw) & \
(south_west == bw) & \
(north_west == bw)
return ~idx * bw
def signed_bwdist(im):
'''
Find perim and return masked image (signed/reversed)
'''
im = -bwdist(bwperim(im))*np.logical_not(im) + bwdist(bwperim(im))*im
return im
def bwdist(im):
'''
Find distance map of image
'''
dist_im = distance_transform_edt(1-im)
return dist_im
def interp_shape(top, bottom, num):
if num<0 and round(num) == num:
print("Error: number of slices to be interpolated must be integer>0")
top = signed_bwdist(top)
bottom = signed_bwdist(bottom)
r, c = top.shape
t = num+2
print("Rows - Cols - Slices")
print(r, c, t)
print("")
# rejoin top, bottom into a single array of shape (2, r, c)
# MATLAB: cat(3,bottom,top)
top_and_bottom = np.r_['0,3', top, bottom]
#top_and_bottom = np.rollaxis(top_and_bottom, 0, 3)
# create ndgrids
x,y,z = np.mgrid[0:r, 0:c, 0:t-1] # existing data
x1,y1,z1 = np.mgrid[0:r, 0:c, 0:t] # including new slice
print("Shape x y z:", x.shape, y.shape, z.shape)
print("Shape x1 y1 z1:", x1.shape, y1.shape, z1.shape)
print(top_and_bottom.shape, len(x), len(y), len(z))
# Do interpolation
out = interpn((x,y,z), top_and_bottom, (x1,y1,z1))
# MATLAB: out = out(:,:,2:end-1)>=0;
array_lim = out[-1]-1
out[out[:,:,2:out] >= 0] = 1
return out
I call this as follows:
new_image = interp_shape(image_1,image_2, 1)
Im pretty sure this is 80% of the way there but I still get this error when running:
ValueError: The points in dimension 0 must be strictly ascending
Again, I am probably not constructing my meshes correctly. I believe np.mgrid should produce the same result as MATLABs ndgrid though.
Is there a better way to construct the ndgrid equivalents?
I figured this out. Or at least a method that produces desirable results.
Based on: Interpolating Between Two Planes in 3d space
def signed_bwdist(im):
'''
Find perim and return masked image (signed/reversed)
'''
im = -bwdist(bwperim(im))*np.logical_not(im) + bwdist(bwperim(im))*im
return im
def bwdist(im):
'''
Find distance map of image
'''
dist_im = distance_transform_edt(1-im)
return dist_im
def interp_shape(top, bottom, precision):
'''
Interpolate between two contours
Input: top
[X,Y] - Image of top contour (mask)
bottom
[X,Y] - Image of bottom contour (mask)
precision
float - % between the images to interpolate
Ex: num=0.5 - Interpolate the middle image between top and bottom image
Output: out
[X,Y] - Interpolated image at num (%) between top and bottom
'''
if precision>2:
print("Error: Precision must be between 0 and 1 (float)")
top = signed_bwdist(top)
bottom = signed_bwdist(bottom)
# row,cols definition
r, c = top.shape
# Reverse % indexing
precision = 1+precision
# rejoin top, bottom into a single array of shape (2, r, c)
top_and_bottom = np.stack((top, bottom))
# create ndgrids
points = (np.r_[0, 2], np.arange(r), np.arange(c))
xi = np.rollaxis(np.mgrid[:r, :c], 0, 3).reshape((r**2, 2))
xi = np.c_[np.full((r**2),precision), xi]
# Interpolate for new plane
out = interpn(points, top_and_bottom, xi)
out = out.reshape((r, c))
# Threshold distmap to values above 0
out = out > 0
return out
# Run interpolation
out = interp_shape(image_1,image_2, 0.5)
Example output:
I came across a similar problem where I needed to interpolate the shift between frames where the change did not merely constitute a translation but also changes to the shape itself . I solved this problem by :
Using center_of_mass from scipy.ndimage.measurements to calculate the center of the object we want to move in each frame
Defining a continuous parameter t where t=0 first and t=1 last frame
Interpolate the motion between two nearest frames (with regard to a specific t value) by shifting the image back/forward via shift from scipy.ndimage.interpolation and overlaying them.
Here is the code:
def inter(images,t):
#input:
# images: list of arrays/frames ordered according to motion
# t: parameter ranging from 0 to 1 corresponding to first and last frame
#returns: interpolated image
#direction of movement, assumed to be approx. linear
a=np.array(center_of_mass(images[0]))
b=np.array(center_of_mass(images[-1]))
#find index of two nearest frames
arr=np.array([center_of_mass(images[i]) for i in range(len(images))])
v=a+t*(b-a) #convert t into vector
idx1 = (np.linalg.norm((arr - v),axis=1)).argmin()
arr[idx1]=np.array([0,0]) #this is sloppy, should be changed if relevant values are near [0,0]
idx2 = (np.linalg.norm((arr - v),axis=1)).argmin()
if idx1>idx2:
b=np.array(center_of_mass(images[idx1])) #center of mass of nearest contour
a=np.array(center_of_mass(images[idx2])) #center of mass of second nearest contour
tstar=np.linalg.norm(v-a)/np.linalg.norm(b-a) #define parameter ranging from 0 to 1 for interpolation between two nearest frames
im1_shift=shift(images[idx2],(b-a)*tstar) #shift frame 1
im2_shift=shift(images[idx1],-(b-a)*(1-tstar)) #shift frame 2
return im1_shift+im2_shift #return average
if idx1<idx2:
b=np.array(center_of_mass(images[idx2]))
a=np.array(center_of_mass(images[idx1]))
tstar=np.linalg.norm(v-a)/np.linalg.norm(b-a)
im1_shift=shift(images[idx2],-(b-a)*(1-tstar))
im2_shift=shift(images[idx1],(b-a)*(tstar))
return im1_shift+im2_shift
Result example
I don't know the solution to your problem, but I don't think it's possible to do this with interpn.
I corrected the code that you tried, and used the following input images:
But the result is:
Here's the corrected code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import interpolate
n = 8
img1 = np.zeros((n, n))
img2 = np.zeros((n, n))
img1[2:4, 2:4] = 1
img2[4:6, 4:6] = 1
plt.figure()
plt.imshow(img1, cmap=cm.Greys)
plt.figure()
plt.imshow(img2, cmap=cm.Greys)
points = (np.r_[0, 2], np.arange(n), np.arange(n))
values = np.stack((img1, img2))
xi = np.rollaxis(np.mgrid[:n, :n], 0, 3).reshape((n**2, 2))
xi = np.c_[np.ones(n**2), xi]
values_x = interpolate.interpn(points, values, xi, method='linear')
values_x = values_x.reshape((n, n))
print(values_x)
plt.figure()
plt.imshow(values_x, cmap=cm.Greys)
plt.clim((0, 1))
plt.show()
I think the main difference between your code and mine is in the specification of xi. interpn tends to be somewhat confusing to use, and I've explained it in greater detail in an older answer. If you're curious about the mechanics of how I've specified xi, see this answer of mine explaining what I've done.
This result is not entirely surprising, because interpn just linearly interpolated between the two images: so the parts which had 1 in one image and 0 in the other simply became 0.5.
Over here, since one image is the translation of the other, it's clear that we want an image that's translated "in-between". But how would interpn interpolate two general images? If you had one small circle and one big circle, is it in any way clear that there should be a circle of intermediate size "between" them? What about interpolating between a dog and a cat? Or a dog and a building?
I think you are essentially trying to "draw lines" connecting the edges of the two images and then trying to figure out the image in between. This is similar to sampling a moving video at a half-frame. You might want to check out something like optical flow, which connects adjacent frames using vectors. I'm not aware if and what python packages/implementations are available though.
Is there any python implementation of MINRES pseudoinversion algorithm that can deal with Hermitian matrices?
I have found a few sources, but all of them are only capable of working with real matrices and do not seem to be easily generalizable onto the complex case:
https://searchcode.com/codesearch/view/89958680/
https://github.com/pascanur/theano_optimize
(there are a couple of other links, but my reputation does not allow me to post them)
A Hermitian system of size $n$
$$\mathbf y = \mathbf H^{-1}\mathbf v$$
can be embedded in a real, symmetric system of size $2n$:
\begin{equation}
\begin{bmatrix}
\Re(\mathbf y)\\Im(\mathbf y)
\end{bmatrix}=
\begin{bmatrix}
\Re(\mathbf H)&-\Im(\mathbf H)\\Im(\mathbf H)&\Re(\mathbf H)
\end{bmatrix}^{-1}
\begin{bmatrix}
\Re(\mathbf v)\\Im(\mathbf v)
\end{bmatrix}.
\end{equation}
Minimum-residual methods are often used for large problems, where constructing $H$ is impractical. In which case we may have an operation which computes a matrix-vector product, $f: \mathbb C^n \to \mathbb C^n; ,, f(\mathbf v) = \mathbf H\mathbf v.$ This function can be wrapped to operate on $\mathbf x \in \mathbb R^{2n}$ by converting $\mathbf x$ back to a complex vector, applying $f$, and then embedding the result back in $\mathbb R^{2n}$.
Here is an example in python / numpy / scipy:
from scipy.sparse.linalg import minres, LinearOperator
from pylab import *
# Problem size
N = 100
# error helper
er = lambda t,a,b:print('%s error:'%t,mean(abs(a-b)))
# random Hermitian matrix
Q = randn(N,N) + 1j*randn(N,N)
H = Q#conj(Q.T)
# random complex vector
v = randn(N) + 1j*randn(N)
# ground-truth solution
x0 = inv(H)#v
# Pack/unpack complex vector as stacked real vector
c2r = lambda v:block([real(v),imag(v)])
r2c = lambda v:kron([1,1j],eye(N))#v
# Verify that we can embed C^n in R^(2N)
Hr = real(H)
Hi = imag(H)
Hs = block([[Hr,-Hi],[Hi,Hr]])
vs = c2r(v)
xs = inv(Hs)#vs
x1 = r2c(xs)
er('Embed',x0,x1)
# Verify that minres works as expected in R-embed
x2 = r2c(minres(Hs,vs,tol=1e-12)[0])
er('Minres 1',x0,x2)
# Demonstrate using operators
Av = lambda u:c2r( H # r2c(u) )
A = LinearOperator((N*2,)*2,Av,Av)
# Minres, converting input/output to/from complex/real
x3 = r2c(minres(Hs,vs,tol=1e-12)[0])
er('Minres 2',x0,x3)
>>> Embed error: 5.317184726020268e-12
>>> Minres 1 error: 6.641342200989796e-11
>>> Minres 2 error: 6.641342200989796e-11
I'm applying convolution techniques to convolve 2 datasets, a healpix map with nside = 256 and a primary beam of shape (256, 256) in order to measure the total intensity from the convolved healpix map. My problem is that after convolving my map with the primary beam i get rings in my convolved map. I've tried normalizing it with either lanczos or Gaussian kernel to take care of the rings but all these approaches have failed.
In my code below, i used the query function in scipy to search for the nearest pixels in my healpix map within a given radius and take the sum of the product of the corresponding pixels in the primary beam using map coordinate. The final image i get has rings in it. Please can anyone help me solve this problem? Thanks in advance.
def query_npix(nside, npix, radius):
print 'searching for nearest pixels:......'
t1, t2 = hp.pix2ang(nside, np.arange(npix))
tree = spatial.cKDTree(zip(t1, t2))
dist, ipix_indx = tree.query(zip(t1, t2), k = 150, distance_upper_bound = radius)
r1, r2 = hp.pix2ang(nside, ipix_indx)
ra = r1.T - t1
dec = r2.T - t2
print 'Done searching'
return np.array(dist), np.array(ipix_indx), np.array(ra.T), np.array(dec.T)
def fullSky_convolve(healpix_map, primary_beam_fits, ipix_indx, dist, radius, r1, r2):
measured_map = []
hdulist = openFitsFile(primary_beam_fits)
beam_data = hdulist[0].data
header = hdulist[0].header
nside = hp.get_nside(healpix_map[0, ...])
npix = hp.get_map_size(healpix_map[0, ...]) # total number of pixels in the map must be 12 * nside^2
crpix1, crval1, cdelt1 = [ header.get(x) for x in "CRPIX1", "CRVAL1", "CDELT1" ]
crpix2, crval2, cdelt2 = [ header.get(x) for x in "CRPIX2", "CRVAL2", "CDELT2" ]
# beam centres in pixel coordinates
xc = crpix1-1 + (np.rad2deg(r1.ravel()) - crval1)/(256*cdelt1)
yc = crpix2-1 + (np.rad2deg(r2.ravel()) - crval2)/(256*cdelt2)
#xc = (np.rad2deg(r1.ravel()) )/cdelt1
for j in xrange(4):
print 'started Stokes: %d' %j
for iter in xrange(0 + j, 16, 4):
outpt = np.zeros(shape = npix, dtype=np.float64)
#by = outpt.copy()
# mask beam
bm_data = beam_data[iter]
#masked_beam= beam_data[iter]
shape = bm_data.shape
rad = np.linspace(-shape[0]/2,shape[-1]/2,shape[0])
rad2d = np.sqrt(rad[np.newaxis,:]**2+rad[:,np.newaxis]**2)
mask = rad2d <= radius/abs(cdelt2)
masked_beam = bm_data*mask
s1 = ndimage.map_coordinates(masked_beam, [xc, yc], mode = 'constant')
bm_map = s1.reshape(dist.shape[0], dist.shape[-1])
for itr in xrange(npix):
g_xy = (1.0/(np.sqrt(2*np.pi)*np.std(dist[itr])))*np.exp(-(dist[itr])**2/(2*np.var(dist[itr])))
#weighted_healpix_map = np.convolve(healpix_map[j, ...][ipix_indx[itr]], g_xy/g_xy.sum(), mode='same')
weighted_healpix_map = ndimage.filters.convolve(healpix_map[j, ...][ipix_indx[itr]], g_xy/g_xy.sum(), mode='reflect')
#outpt[itr] = np.sum(weighted_healpix_map*(bm_map[itr]/bm_map[itr].sum()))
outpt[itr] = np.sum(weighted_healpix_map*(bm_map[itr]))
#print 'itr', itr
alpha = file('pap%d.save'%iter, 'wb')
#h_map = ndimage.filters.gaussian_filter(outpt, sigma = 3.)
cPickle.dump(outpt, alpha, protocol = cPickle.HIGHEST_PROTOCOL)
alpha.close()
print 'Just dumped stripp%d.save:-------'%iter
print 'Loading dumped files:-------'
loaded_objects = []
for itr4 in xrange(16):
alpha = file('stripp%d.save'%itr4, 'rb')
loaded_objects.append(cPickle.load(alpha))
alpha.close()
measured_map.append(copy.deepcopy(loaded_objects))
return measured_map
Remember that HEALPix maps can be in either "Ring" or "Nested" format. It sounds like you may need to add the keyword nest=True to your healpy functions like hp.pix2ang. If your input maps are in nested format, this keyword is needed.
For example:
I recently tried using the healpy.smoothing() function, and found my resulting image to have rings (perhaps like you described), upon viewing the output map with healpix.mollview(). The rings disappeared and the image was presented as I expected, after running mollview with the nested=True keyword. Check what ordering schemes your input files use
Reference:
http://healpy.readthedocs.org/en/latest/tutorial.html#creating-and-manipulating-maps
Healpix supports two different ordering schemes, RING or NESTED. By
default, healpy maps are in RING ordering. In order to work with
NESTED ordering, all map related functions support the nest keyword,
for example: hp.mollview(m, nest=True, title="Mollview image NESTED")
I want to create a skeleton based on an existing segmentation, similar to what is done here (from sk-image):
However I want to do this on 3D data. Is there code for that somewhere out there? Preferably in python but any language helps.
I am aware of this great site, however I think they don't offer any code.
I am planning on using that on volumes of about 500x500x500 Pixels, so it should scale well...
I am developing this tools in this link below. The function getSkeletonize3D in program of the name convOptimize.py lets you thin your 3D data. It took about 30 minutes to give the result for the 512 cube I have. Let me know if you have any problems. https://github.com/3Scan/3scan-skeleton. The paper I used for implementing is in the comments in the code below
Basically how this 3D skeletonization algorithm works is, in each pass it has 12 subiterations in which it removes boundaries in specific directions iteratively, until you get a skeleton in the center.
The main python code that is needed for skeletonizing your data is as below. As it requires imports from different other porgrams rotationalOperators which has an import from another file called Thin3dtemplates. I recommend you downlaod rotationalOperators, Thin3dtemplates, convoptimize python scripting files and also download lookuparray.npy which is a file that is used as a lookup table in an numpy array format pre-calculated for validating a voxel for marking to be deleted or not. You need python > 3 version, scipy, numpy and pyeda modules installed to run these codes.
import numpy as np
import time
from scipy import ndimage
from scipy.ndimage.filters import convolve
"""
the following subiteration functions are how each image is rotated to the next direction for removing
boundary voxels in the order described in the reference paper
us, ne, wd,..
"""
from rotationalOperators import firstSubiteration, secondSubiteration, thirdSubiteration, fourthSubiteration, fifthSubiteration, sixthSubiteration, seventhSubiteration, eighthSubiteration, ninthSubiteration, tenthSubiteration, eleventhSubiteration, twelvethSubiteration
"""
reference paper
http://web.inf.u-szeged.hu/ipcg/publications/papers/PalagyiKuba_GMIP1999.pdf
input should be a binary image/ already segmented
"""
"""
array that has calculated the validity of the 14 templates beforehand and stored each index which is
decimal number of the binary string of 26 values (sqrt(3) connectivity) that are around a single voxel
"""
lookUpTablearray = np.load('lookupTablearray.npy')
def _convolveImage(arr, flippedKernel):
arr = np.ascontiguousarray(arr, dtype=np.uint64)
result = convolve(arr, flippedKernel, mode='constant', cval=0)
result[arr == 0] = 0
return result
"""
each of the 12 iterations corresponds to each of the following
directions - us, ne, wd, es, uw, nd, sw, un, ed, nw, ue, sd
imported from template expressions
evaluated in advance using pyeda
https://pyeda.readthedocs.org/en/latest/expr.html
"""
sElement = ndimage.generate_binary_structure(3, 1)
def _getBouondariesOfimage(image):
"""
function to find boundaries/border/edges of the array/image
"""
erode_im = ndimage.morphology.binary_erosion(image, sElement)
boundaryIm = image - erode_im
return boundaryIm
"""
each of the 12 iterations corresponds to each of the following
directions - us, ne, wd, es, uw, nd, sw, un, ed, nw, ue, sd
imported from template expressions
evaluated in advance using pyeda
https://pyeda.readthedocs.org/en/latest/expr.html
"""
directionList = [firstSubiteration, secondSubiteration, thirdSubiteration, fourthSubiteration,
fifthSubiteration, sixthSubiteration, seventhSubiteration, eighthSubiteration,
ninthSubiteration, tenthSubiteration, eleventhSubiteration, twelvethSubiteration]
def _skeletonPass(image):
"""
each pass consists of 12 serial subiterations and finding the
boundaries of the padded image/array
"""
boundaryIm = _getBouondariesOfimage(image)
numPixelsremovedList = [] * 12
boundaryIndices = list(set(map(tuple, list(np.transpose(np.nonzero(boundaryIm))))))
for i in range(0, 12):
convImage = _convolveImage(image, directionList[i])
totalPixels, image = _applySubiter(image, boundaryIndices, convImage)
print("number of pixels removed in the {} direction is {}". format(i, totalPixels))
numPixelsremovedList.append(totalPixels)
numPixelsremoved = sum(numPixelsremovedList)
return numPixelsremoved, image
def _applySubiter(image, boundaryIndices, convImage):
"""
each subiteration paralleley reduces the border voxels in 12 directions
going through each voxel and marking if it can be deleted or not in a
different image named temp_del and finally multiply it with the original
image to delete the voxels so marked
"""
temp_del = np.zeros_like(image)
# boundaryIndicesCopy = copy.deepcopy(boundaryIndices)
lenB = len(boundaryIndices)
for k in range(0, lenB):
temp_del[boundaryIndices[k]] = lookUpTablearray[convImage[boundaryIndices[k]]]
numpixel_removed = np.einsum('ijk->', image * temp_del, dtype=int)
image[temp_del == 1] = 0
return numpixel_removed, image
def getSkeletonize3D(image):
"""
function to skeletonize a 3D binary image with object in brighter contrast than background.
In other words, 1 = object, 0 = background
"""
assert np.max(image) in [0, 1]
zOrig, yOrig, xOrig = np.shape(image)
padImage = np.lib.pad(image, 1, 'constant', constant_values=0)
start_skeleton = time.time()
pass_no = 0
numpixel_removed = 0
while pass_no == 0 or numpixel_removed > 0:
numpixel_removed, padImage = _skeletonPass(padImage)
print("number of pixels removed in pass {} is {}".format(pass_no, numpixel_removed))
pass_no += 1
print("done %i number of pixels in %f seconds" % (np.sum(image), time.time() - start_skeleton))
return padImage[1: zOrig + 1, 1: yOrig + 1, 1: xOrig + 1]
if __name__ == '__main__':
sample = np.ones((5, 5, 5), dtype=np.uint8)
resultSkel = getSkeletonize3D(sample)
# gives a single voxel at the center
print("resultSkel", resultSkel)