Change basis of 3D numpy array (fractional to cartesian coordinates) - python

Here is a 2D example of what I want to achieve in 3D:
I have an array of values, A, s.t. A.shape=(n,m), e.g.
>>> A = [[1, 2],
... [3, 4]]
whose indexes are proportional to equally spaced steps along (arbitrary) basis vectors, e.g.
>>> v1 = [1,0]
>>> v2 = [cos(pi/4),sin(pi/4)] # [0,1] rotated 45 degrees
I want a function which applies this basis to get, for this example
>>> apply_basis2D(A,v1,v2)
[[np.nan,1, 2],
[3, 4, np.nan]]
(so for the 3D version then, I'd want apply_basis3D(A,v1,v2,v3)), where A.shape=(n,m,l))
I have a notion that this can be done by affine transformations, but am not really sure how. This is as close an implementation as I could find (2D-only), using scikit-image;
Thanks in advance!

Done! Seems to work quite well, but I welcome critique:
import numpy as np
from scipy.spatial import Delaunay
from scipy.interpolate import interpn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib notebook
def cartesian_basis2d(A,v1,v2,longest_side=None):
"""convert 2d array in basis v1,v2 to cartesian basis
Properties
----------
A : array((N,M))
values in original basis
v1 : array((2,))
v2 : array((2,))
longest_side : int
longest side (in terms of indexes) of new array
Returns
-------
B : array((P,Q))
where P,Q >= longest_side
"""
longest_side = max(A.shape) if longest_side is None else longest_side
# assumed
origin = [0,0]
# convert to numpy arrays
origin = np.asarray(origin)
v1 = np.asarray(v1)
v2 = np.asarray(v2)
# pre-compute basis transformation matrix
M_inv = np.linalg.inv(np.transpose([v1,v2]))
# only works rigth if transposed before and after?
A = np.array(A).T
# add bounding rows/columns for interpolation
A = np.concatenate((np.array(A[:,0],ndmin=2).T,A,np.array(A[:,-1],ndmin=2).T),axis=1)
A = np.concatenate((np.array(A[0],ndmin=2),A,np.array(A[-1],ndmin=2)),axis=0)
# create axes
axes=[]
for i,v in enumerate([v1,v2]):
step = 1./(A.shape[i]-2)
ax = np.linspace(0,1+step,A.shape[i]) - step/2.
axes.append(ax)
# get bounding box and compute it volume and extents
bbox_pts=np.asarray([origin,v1,v1+v2,v2])
hull = Delaunay(bbox_pts)
bbox_x, bbox_y = bbox_pts.T
new_bounds = bbox_x.min(),bbox_x.max(),bbox_y.min(),bbox_y.max() #l,r,bottom,top
min_bound, max_bound = min(bbox_x.min(),bbox_y.min()), max(bbox_x.max(),bbox_y.max())
# compute new array size
x_length = abs(new_bounds[0]-new_bounds[1])
y_length = abs(new_bounds[2]-new_bounds[3])
if x_length>y_length:
xlen = longest_side
ylen = int(longest_side*y_length/float(x_length))
else:
ylen = longest_side
xlen = int(longest_side*x_length/float(y_length))
# compute new array values
new_array = np.full((xlen,ylen),np.nan)
xidx, yidx = np.meshgrid(range(new_array.shape[0]),range(new_array.shape[1]))
xidx=xidx.flatten()
yidx=yidx.flatten()
xyidx = np.concatenate((np.array(xidx,ndmin=2).T,np.array(yidx,ndmin=2).T),axis=1)
xy = min_bound+(xyidx.astype(float)*abs(min_bound-max_bound)/longest_side)
# find point is inside bounding box
inside_mask = hull.find_simplex(xy)>=0
uv = np.einsum('...jk,...k->...j',M_inv,xy[inside_mask])
new_array[xyidx[inside_mask][:,0],xyidx[inside_mask][:,1]] = interpn(axes,A,uv,bounds_error=True,method='nearest')
new_array = new_array.T
return new_array
A = np.array(
[[1,2,3],
[4,5,6],
[7,8,9]])
v1 = [2,0]
v2 = [np.cos(np.pi/4),np.sin(np.pi/4)]
new_array = cartesian_basis2d(A,v1,v2,100)
plt.imshow(new_array,origin='lower');
def cartesian_basis3d(A,v1,v2,v3,longest_side=None):
"""convert 3d array in basis v1,v2,v3 to cartesian basis
Properties
----------
A : array((N,M))
values in original basis
v1 : array((2,))
v2 : array((2,))
v3 : array((2,))
longest_side : int
longest side (in terms of indexes) of new array
Returns
-------
B : array((P,Q))
where P,Q >= longest_side
"""
longest_side = max(A.shape) if longest_side is None else longest_side
# assumed
origin = [0,0,0]
# convert to numpy arrays
origin = np.asarray(origin)
v1 = np.asarray(v1)
v2 = np.asarray(v2)
v3 = np.asarray(v3)
# pre-compute basis transformation matrix
M_inv = np.linalg.inv(np.transpose([v1,v2,v3]))
# only works rigth if transposed before and after?
A = np.array(A).T
# add bounding layers for interpolation
A = np.concatenate((np.array(A[0],ndmin=3),A,np.array(A[-1],ndmin=3)),axis=0)
start = np.transpose(np.array(A[:,:,0],ndmin=3),axes=[1,2,0])
end = np.transpose(np.array(A[:,:,-1],ndmin=3),axes=[1,2,0])
A = np.concatenate((start,A,end),axis=2)
start = np.transpose(np.array(A[:,0,:],ndmin=3),axes=[1,0,2])
end = np.transpose(np.array(A[:,-1,:],ndmin=3),axes=[1,0,2])
A = np.concatenate((start,A,end),axis=1)
# create axes
axes=[]
for i,v in enumerate([v1,v2,v3]):
step = 1./(A.shape[i]-2)
ax = np.linspace(0,1+step,A.shape[i]) - step/2.
axes.append(ax)
# get bounding box and compute it volume and extents
bbox_pts=np.asarray([origin,v1,v2,v3,v1+v2,v1+v3,v1+v2+v3,v2+v3])
hull = Delaunay(bbox_pts)
bbox_x, bbox_y, bbox_z = bbox_pts.T
new_bounds = bbox_x.min(),bbox_x.max(),bbox_y.min(),bbox_y.max(),bbox_z.min(),bbox_z.max() #l,r,bottom,top
min_bound, max_bound = min(bbox_x.min(),bbox_y.min(),bbox_z.min()), max(bbox_x.max(),bbox_y.max(),bbox_z.min())
# compute new array size
x_length = abs(new_bounds[0]-new_bounds[1])
y_length = abs(new_bounds[2]-new_bounds[3])
z_length = abs(new_bounds[4]-new_bounds[5])
if x_length == max([x_length,y_length,z_length]):
xlen = longest_side
ylen = int(longest_side*y_length/float(x_length))
zlen = int(longest_side*z_length/float(x_length))
elif y_length == max([x_length,y_length,z_length]):
ylen = longest_side
xlen = int(longest_side*x_length/float(y_length))
zlen = int(longest_side*z_length/float(y_length))
else:
zlen = longest_side
xlen = int(longest_side*x_length/float(z_length))
ylen = int(longest_side*y_length/float(z_length))
# compute new array values
new_array = np.full((xlen,ylen,zlen),np.nan)
xidx, yidx, zidx = np.meshgrid(range(new_array.shape[0]),range(new_array.shape[1]),range(new_array.shape[2]))
xidx=xidx.flatten()
yidx=yidx.flatten()
zidx=zidx.flatten()
xyzidx = np.concatenate((np.array(xidx,ndmin=2).T,np.array(yidx,ndmin=2).T,np.array(zidx,ndmin=2).T),axis=1)
xyz = min_bound+(xyzidx.astype(float)*abs(min_bound-max_bound)/longest_side)
# find point is inside bounding box
inside_mask = hull.find_simplex(xyz)>=0
uvw = np.einsum('...jk,...k->...j',M_inv,xyz[inside_mask])
new_array[xyzidx[inside_mask][:,0],xyzidx[inside_mask][:,1],xyzidx[inside_mask][:,2]] = interpn(axes,A,uvw,bounds_error=True,method='nearest')
new_array = new_array.T
return new_array
A = np.array(
[[[1,1],[2,2]],
[[3,3],[4,4]]])
v1 = [2,0,0]
v2 = [np.cos(np.pi/4),np.sin(np.pi/4),0]
v3 = [0,np.cos(np.pi/4),np.sin(np.pi/4)]
new_array = cartesian_basis3d(A,v1,v2,v3,100)
xs,ys,zs = new_array.nonzero()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
pcm = ax.scatter(xs, ys, zs, c=new_array[xs,ys,zs],cmap='jet')
plt.show()

Related

filter simplices out of scipy.spatial.Delaunay

Short version:
Is it possible to create a new scipy.spatial.Delaunay object with a subset of the triangles (2D data) from an existing object?
The goal would be to use the find_simplex method on the new object with filtered out simplices.
Similar but not quite the same
matplotlib contour/contourf of **concave** non-gridded data
How to deal with the (undesired) triangles that form between the edges of my geometry when using Triangulation in matplotlib
Long version:
I am looking at lat-lon data that I regrid with scipy.interpolate.griddata like in the pseudo-code below:
import numpy as np
from scipy.interpolate import griddata
from scipy.spatial import Delaunay
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
#lat shape (a,b): 2D array of latitude values
#lon shape (a,b): 2D array of longitude values
#x shape (a,b): 2D array of variable of interest at lat and lon
# lat-lon data
nonan = ~np.isnan(lat)
flat_lat = lat[nonan]
flat_lon = lon[nonan]
flat_x = x[nonan]
# regular lat-lon grid for regridding
lon_ar = np.arange(loni,lonf,resolution)
lat_ar = np.arange(lati,latf,resolution)
lon_grid, lat_grid = np.meshgrid(lon_ar,lat_ar)
# regrid
x_grid = griddata((flat_lon,flat_lat),flat_x,(lon_grid,lat_grid), method='nearest')
# filter out extrapolated values
cloud_points = _ndim_coords_from_arrays((flat_lon,flat_lat))
regrid_points = _ndim_coords_from_arrays((lon_grid.ravel(),lat_grid.ravel()))
tri = Delaunay(cloud_points)
outside_hull = tri.find_simplex(regrid_points) < 0
x_grid[outside_hull.reshape(x_grid.shape)] = np.nan
# filter out large triangles ??
# it would be easy if I could "subset" tri into a new scipy.spatial.Delaunay object
# new_tri = ??
# outside_hull = new_tri.find_simplex(regrid_points) < 0
The problem is that the convex hull has low quality (very large, shown in blue in example below) triangles that I would like to filter out as they don't represent the data well. I know how to filter them out in input points, but not in the regridded output. Here is the filter function:
def filter_large_triangles(
points: np.ndarray, tri: Optional[Delaunay] = None, coeff: float = 2.0
):
"""
Filter out triangles that have an edge > coeff * median(edge)
Inputs:
tri: scipy.spatial.Delaunay object
coeff: triangles with an edge > coeff * median(edge) will be filtered out
Outputs:
valid_slice: boolean array that selects "normal" triangles
"""
if tri is None:
tri = Delaunay(points)
edge_lengths = np.zeros(tri.vertices.shape)
seen = {}
# loop over triangles
for i, vertex in enumerate(tri.vertices):
# loop over edges
for j in range(3):
id0 = vertex[j]
id1 = vertex[(j + 1) % 3]
# avoid calculating twice for non-border edges
if (id0,id1) in seen:
edge_lengths[i, j] = seen[(id0,id1)]
else:
edge_lengths[i, j] = np.linalg.norm(points[id1] - points[id0])
seen[(id0,id1)] = edge_lengths[i, j]
median_edge = np.median(edge_lengths.flatten())
valid_slice = np.all(edge_lengths < coeff * median_edge, axis=1)
return valid_slice
The bad triangles are shown in blue below:
import matplotlib.pyplot as plt
no_large_triangles = filter_large_triangles(cloud_points,tri)
fig,ax = plt.subplot()
ax.triplot(points[:,0],points[:,1],tri.simplices,c='blue')
ax.triplot(points[:,0],points[:,1],tri.simplices[no_large_triangles],c='green')
plt.show()
Is it possible to create a new scipy.spatial.Delaunay object with only the no_large_triangles simplices? The goal would be to use the find_simplex method on that new object to easily filter out points.
As an alternative how could I find the indices of points in regrid_points that fall inside the blue triangles? (tri.simplices[~no_large_triangles])
So it is possible to modify the Delaunay object for the purpose of using find_simplex on a subset of simplices, but it seems only with the bruteforce algorithm.
# filter out extrapolated values
cloud_points = _ndim_coords_from_arrays((flat_lon,flat_lat))
regrid_points = _ndim_coords_from_arrays((lon_grid.ravel(),lat_grid.ravel()))
tri = Delaunay(cloud_points)
outside_hull = tri.find_simplex(regrid_points) < 0
# filter out large triangles
large_triangles = ~filter_large_triangles(cloud_points,tri)
large_triangle_ids = np.where(large_triangles)[0]
subset_tri = tri # this doesn't preserve tri, effectively just a renaming
# the _find_simplex_bruteforce method only needs the simplices and neighbors
subset_tri.nsimplex = large_triangle_ids.size
subset_tri.simplices = tri.simplices[large_triangles]
subset_tri.neighbors = tri.neighbors[large_triangles]
# update neighbors
for i,triangle in enumerate(subset_tri.neighbors):
for j,neighbor_id in enumerate(triangle):
if neighbor_id in large_triangle_ids:
# reindex the neighbors to match the size of the subset
subset_tri.neighbors[i,j] = np.where(large_triangle_ids==neighbor_id)[0]
elif neighbor_id>=0 and (neighbor_id not in large_triangle_ids):
# that neighbor was a "normal" triangle that should not exist in the subset
subset_tri.neighbors[i,j] = -1
inside_large_triangles = subset_tri.find_simplex(regrid_points,bruteforce=True) >= 0
invalid_slice = np.logical_or(outside_hull,inside_large_triangles)
x_grid[invalid_slice.reshape(x_grid.shape)] = np.nan
Showing that the new Delaunay object has only the subset of large triangles
import matplotlib.pyplot as plt
fig,ax = plt.subplot()
ax.triplot(cloud_points[:,0],cloud_points[:,1],subset_tri.simplices,color='red')
plt.show()
Plotting x_grid with pcolormesh before the filtering for large triangles (zoomed in the blue circle above):
After the filtering:

Does this python integration scheme match the analytic expression?

According to the original paper by Huang
https://arxiv.org/pdf/1401.4211.pdf
The marginal Hibert spectrum is given by:
where A = A(w,t) (i.e., a function time and frequency) and p(w,A)
the joint probability density function of P(ω, A) of the frequency [ωi] and amplitude [Ai].
I am trying to estimate 1) The joint probability density using the plt.hist2d 2) the integral shown below using a sum.
The code I am using is the following:
IA_flat1 = np.ravel(IA) ### Turn matrix to 1 D array
IF_flat1 = np.ravel(IF) ### Here IA corresponds to A
IF_flat = IF_flat1[(IF_flat1>min_f) & (IF_flat1<fs)] ### Keep only desired frequencies
IA_flat = IA_flat1[(IF_flat1>min_f) & (IF_flat1<fs)] ### Keep IA that correspond to desired frequencies
### return the Joint probability density
Pjoint,f_edges, A_edges,_ = plt.hist2d(IF_flat,IA_flat,bins=[bins_F,bins_A], density=True)
plt.close()
n1 = np.digitize(IA_flat, A_edges).astype(int) ### Return the indices of the bins to which
n2 = np.digitize(IF_flat, f_edges).astype(int) ### each value in input array belongs.
### define integration function
from numba import jit, prange ### Numba is added for speed
#jit(nopython=True, parallel= True)
def get_int(A_edges, Pjoint ,IA_flat, n1, n2):
dA = np.diff(A_edges)[0] ### Find dx for integration
sum_h = np.zeros(np.shape(Pjoint)[0]) ### Intitalize array
for j in prange(np.shape(Pjoint)[0]):
h = np.zeros(np.shape(Pjoint)[1]) ### Intitalize array
for k in prange(np.shape(Pjoint)[1]):
needed = IA_flat[(n1==k) & (n2==j)] ### Keep only the elements of arrat that
### are related to PJoint[j,k]
h[k] = Pjoint[j,k]*np.nanmean(needed**2)*dA ### Pjoint*A^2*dA
sum_h[j] = np.nansum(h) ### Sum_{i=0}^{N}(Pjoint*A^2*dA)
return sum_h
### Now run previously defined function
sum_h = get_int(A_edges, Pjoint ,IA_flat, n1, n2)
1) I am not sure that everything is correct though. Any suggestions or comments on what I might be doing wrong?
2) Is there a way to do the same using a scipy integration scheme?
You can extract the probability from the 2D histogram and use it for the integration:
# Added some numbers to have something to run
import numpy as np
import matplotlib.pyplot as plt
IA = np.random.rand(100,100)
IF = np.random.rand(100,100)
bins_F = np.linspace(0,1,20)
bins_A = np.linspace(0,1,100)
min_f = 0
fs = 1.0
IA_flat1 = np.ravel(IA) ### Turn matrix to 1 D array
IF_flat1 = np.ravel(IF) ### Here IA corresponds to A
IF_flat = IF_flat1[(IF_flat1>min_f) & (IF_flat1<fs)] ### Keep only desired frequencies
IA_flat = IA_flat1[(IF_flat1>min_f) & (IF_flat1<fs)] ### Keep IA that correspond to desired frequencies
### return the Joint probability density
Pjoint,f_edges, A_edges,_ = plt.hist2d(IF_flat,IA_flat,bins=[bins_F,bins_A], density=True)
f_values = (f_edges[1:]+f_edges[:-1])/2
A_values = (A_edges[1:]+A_edges[:-1])/2
dA = A_values[1]-A_values[0] # for the integral
#Pjoint.shape (19,99)
h = np.zeros(f_values.shape)
for i in range(len(f_values)):
f = f_values[i]
# column of the histogram with frequency f, probability
p = Pjoint[i]
# summatory equivalent to the integral
integral_result = np.sum(p*A_values**2*dA )
h[i] = integral_result
plt.figure()
plt.plot(f_values,h)

Matplotlib: Rotate an ellipse in 3D issue [duplicate]

Short question
How can matplotlib 2D patches be transformed to 3D with arbitrary normals?
Long question
I would like to plot Patches in axes with 3d projection. However, the methods provided by mpl_toolkits.mplot3d.art3d only provide methods to have patches with normals along the principal axes. How can I add patches to 3d axes that have arbitrary normals?
Short answer
Copy the code below into your project and use the method
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
to transform your 2D patches to 3D patches with arbitrary normals.
from mpl_toolkits.mplot3d import art3d
def rotation_matrix(d):
"""
Calculates a rotation matrix given a vector d. The direction of d
corresponds to the rotation axis. The length of d corresponds to
the sin of the angle of rotation.
Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
"""
sin_angle = np.linalg.norm(d)
if sin_angle == 0:
return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
"""
Translates the 3D pathpatch by the amount delta.
"""
pathpatch._segment3d += delta
Long answer
Looking at the source code of art3d.pathpatch_2d_to_3d gives the following call hierarchy
art3d.pathpatch_2d_to_3d
art3d.PathPatch3D.set_3d_properties
art3d.Patch3D.set_3d_properties
art3d.juggle_axes
The transformation from 2D to 3D happens in the last call to art3d.juggle_axes. Modifying this last step, we can obtain patches in 3D with arbitrary normals.
We proceed in four steps
Project the vertices of the patch into the XY plane (pathpatch_2d_to_3d)
Calculate a rotation matrix R that rotates the z direction to the direction of the normal (rotation_matrix)
Apply the rotation matrix to all vertices (pathpatch_2d_to_3d)
Translate the resulting object in the z-direction (pathpatch_2d_to_3d)
Sample source code and the resulting plot are shown below.
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import Circle
from itertools import product
ax = axes(projection = '3d') #Create axes
p = Circle((0,0), .2) #Add a circle in the yz plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0.5, normal = 'x')
pathpatch_translate(p, (0, 0.5, 0))
p = Circle((0,0), .2, facecolor = 'r') #Add a circle in the xz plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0.5, normal = 'y')
pathpatch_translate(p, (0.5, 1, 0))
p = Circle((0,0), .2, facecolor = 'g') #Add a circle in the xy plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0, normal = 'z')
pathpatch_translate(p, (0.5, 0.5, 0))
for normal in product((-1, 1), repeat = 3):
p = Circle((0,0), .2, facecolor = 'y', alpha = .2)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0, normal = normal)
pathpatch_translate(p, 0.5)
Very useful piece of code, but there is a small caveat: it cannot handle normals pointing downwards because it uses only the sine of the angle.
You need to use also the cosine:
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import art3d
from mpl_toolkits.mplot3d import proj3d
import numpy as np
def rotation_matrix(v1,v2):
"""
Calculates the rotation matrix that changes v1 into v2.
"""
v1/=np.linalg.norm(v1)
v2/=np.linalg.norm(v2)
cos_angle=np.dot(v1,v2)
d=np.cross(v1,v2)
sin_angle=np.linalg.norm(d)
if sin_angle == 0:
M = np.identity(3) if cos_angle>0. else -np.identity(3)
else:
d/=sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + cos_angle * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1,0,0), index)
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
M = rotation_matrix(normal,(0, 0, 1)) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
"""
Translates the 3D pathpatch by the amount delta.
"""
pathpatch._segment3d += delta
Here's a more generalmethod that allows embedding in more complex ways than along a normal:
class EmbeddedPatch2D(art3d.PathPatch3D):
def __init__(self, patch, transform):
assert transform.shape == (4, 3)
self._patch2d = patch
self.transform = transform
self._path2d = patch.get_path()
self._facecolor2d = patch.get_facecolor()
self.set_3d_properties()
def set_3d_properties(self, *args, **kwargs):
# get the fully-transformed path
path = self._patch2d.get_path()
trans = self._patch2d.get_patch_transform()
path = trans.transform_path(path)
# copy across the relevant properties
self._code3d = path.codes
self._facecolor3d = self._patch2d.get_facecolor()
# calculate the transformed vertices
verts = np.empty(path.vertices.shape + np.array([0, 1]))
verts[:,:-1] = path.vertices
verts[:,-1] = 1
self._segment3d = verts.dot(self.transform.T)[:,:-1]
def __getattr__(self, key):
return getattr(self._patch2d, key)
To use this as desired in the question, we need a helper function
def matrix_from_normal(normal):
"""
given a normal vector, builds a homogeneous rotation matrix such that M.dot([1, 0, 0]) == normal
"""
normal = normal / np.linalg.norm(normal)
res = np.eye(normal.ndim+1)
res[:-1,0] = normal
if normal [0] == 0:
perp = [0, -normal[2], normal[1]]
else:
perp = np.cross(normal, [1, 0, 0])
perp /= np.linalg.norm(perp)
res[:-1,1] = perp
res[:-1,2] = np.cross(self.dir, perp)
return res
All together:
circ = Circle((0,0), .2, facecolor = 'y', alpha = .2)
# the matrix here turns (x, y, 1) into (0, x, y, 1)
mat = matrix_from_normal([1, 1, 0]).dot([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
circ3d = EmbeddedPatch2D(circ, mat)
I want to share my solution that extends the former proposals.
It enables both 3d elements and text to be added to a Axes3D presentation.
# creation of a rotation matrix that preserves the x-axis in an xy-plane of the original coordinate system
def rotationMatrix(normal):
norm = np.linalg.norm(normal)
if norm ==0: return Rotation.identity(None)
zDir = normal/norm
if np.abs(zDir[2])==1:
yDir = np.array([0,zDir[2],0])
else:
yDir = (np.array([0,0,1]) - zDir[2]*zDir)/math.sqrt(1-zDir[2]**2)
rotMat = np.empty((3,3))
rotMat[:,0] = np.cross(zDir,yDir)
rotMat[:,1] = yDir
rotMat[:,2] = -zDir
return Rotation.from_matrix(rotMat)
def toVector(vec):
if vec is None or not isinstance(vec,np.ndarray) : vec="z"
if isinstance(vec,str):
zdir = vec[0] if len(vec)>0 else "z"
if not zdir in "xyz": zdir="z"
index = "xyz".index(vec)
return np.roll((1.0,0,0), index)
else:
return vec
# Transforms a 2D Patch to a 3D patch using a pivot point and a the given normal vector.
def pathpatch_2d_to_3d(pathpatch, pivot=np.zeros(3), zDir='z'):
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = mplot3d.art3d.PathPatch3D #Change the class
pathpatch._path2d = path #Copy the 2d path
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
# Get the 2D vertices and add the third dimension
verts3d = np.empty((path.vertices.shape[0],3))
verts3d[:,0:2] = path.vertices
verts3d[:,2] = pivot[2]
R = rotationMatrix(toVector(zDir))
pathpatch._segment3d = R.apply(verts3d - pivot) + pivot
return pathpatch
# places a 3D text element in axes with 3d projection.
def text3d(xyz, text, zDir="z", scalefactor=1.0, fp=FontProperties(), **kwargs):
pt = PathPatch(TextPath(xyz[0:2], text, size=scalefactor*fp.get_size(), prop=fp , usetex=False),**kwargs)
ax3D.add_patch(pathpatch_2d_to_3d(pt, xyz, zDir))
# places a 3D circle in axes with 3d projection.
def circle3d(center, radius, zDir='z', **kwargs):
pc = Circle(center[0:2], radius, **kwargs)
ax3D.add_patch(pathpatch_2d_to_3d(pc, center, zDir))

How to efficiently convert large numpy array of point cloud data to downsampled 2d array?

I have a large numpy array of unordered lidar point cloud data, of shape [num_points, 3], which are the XYZ coordinates of each point. I want to downsample this into a 2D grid of mean height values - to do this I want to split the data into 5x5 X-Y bins and calculate the mean height value (Z coordinate) in each bin.
Does anyone know any quick/efficient way to do this?
Current code:
import numpy as np
from open3d import read_point_cloud
resolution = 5
# Code to load point cloud and get points as numpy array
pcloud = read_point_cloud(params.POINT_CLOUD_DIR + "Part001.pcd")
pcloud_np = np.asarray(pcloud.points)
# Code to generate example dataset
pcloud_np = np.random.uniform(0.0, 1000.0, size=(1000,3))
# Current (inefficient) code to quantize into 5x5 XY 'bins' and take mean Z values in each bin
pcloud_np[:, 0:2] = np.round(pcloud_np[:, 0:2]/float(resolution))*float(resolution) # Round XY values to nearest 5
num_x = int(np.max(pcloud_np[:, 0])/resolution)
num_y = int(np.max(pcloud_np[:, 1])/resolution)
mean_height = np.zeros((num_x, num_y))
# Loop over each x-y bin and calculate mean z value
x_val = 0
for x in range(num_x):
y_val = 0
for y in range(num_y):
height_vals = pcloud_np[(pcloud_np[:,0] == float(x_val)) & (pcloud_np[:,1] == float(y_val))]
if height_vals.size != 0:
mean_height[x, y] = np.mean(height_vals)
y_val += resolution
x_val += resolution
Here is a suggestion using an np.bincount idiom on the flattened 2d grid. I also took the liberty to add some small fixes to the original code:
import numpy as np
#from open3d import read_point_cloud
resolution = 5
# Code to load point cloud and get points as numpy array
#pcloud = read_point_cloud(params.POINT_CLOUD_DIR + "Part001.pcd")
#pcloud_np = np.asarray(pcloud.points)
# Code to generate example dataset
pcloud_np = np.random.uniform(0.0, 1000.0, size=(1000,3))
def f_op(pcloud_np, resolution):
# Current (inefficient) code to quantize into 5x5 XY 'bins' and take mean Z values in each bin
pcloud_np[:, 0:2] = np.round(pcloud_np[:, 0:2]/float(resolution))*float(resolution) # Round XY values to nearest 5
num_x = int(np.max(pcloud_np[:, 0])/resolution) + 1
num_y = int(np.max(pcloud_np[:, 1])/resolution) + 1
mean_height = np.zeros((num_x, num_y))
# Loop over each x-y bin and calculate mean z value
x_val = 0
for x in range(num_x):
y_val = 0
for y in range(num_y):
height_vals = pcloud_np[(pcloud_np[:,0] == float(x_val)) & (pcloud_np[:,1] == float(y_val)), 2]
if height_vals.size != 0:
mean_height[x, y] = np.mean(height_vals)
y_val += resolution
x_val += resolution
return mean_height
def f_pp(pcloud_np, resolution):
xy = pcloud_np.T[:2]
xy = ((xy + resolution / 2) // resolution).astype(int)
mn, mx = xy.min(axis=1), xy.max(axis=1)
sz = mx + 1 - mn
flatidx = np.ravel_multi_index(xy-mn[:, None], sz)
histo = np.bincount(flatidx, pcloud_np[:, 2], sz.prod()) / np.maximum(1, np.bincount(flatidx, None, sz.prod()))
return (histo.reshape(sz), *(xy * resolution))
res_op = f_op(pcloud_np, resolution)
res_pp, x, y = f_pp(pcloud_np, resolution)
from timeit import timeit
t_op = timeit(lambda:f_op(pcloud_np, resolution), number=10)*100
t_pp = timeit(lambda:f_pp(pcloud_np, resolution), number=10)*100
print("results equal:", np.allclose(res_op, res_pp))
print(f"timings (ms) op: {t_op:.3f} pp: {t_pp:.3f}")
Sample output:
results equal: True
timings (ms) op: 359.162 pp: 0.427
Speedup almost 1000x.

Vectorize compressed sparse matrix from array in Python

I am trying to apply graph theory methods to an image processing problem. I want to generate an adjacency matrix from an array containing the points I want to graph. I want to generate a complete graph of the points in the array. If I have N points in the array that I need to graph, I will need an NxN matrix. The weights should be the distances between the points, so this is the code that I have:
''' vertexarray is an array where the points that are to be
included in the complete graph are True and all others False.'''
import numpy as np
def array_to_complete_graph(vertexarray):
vertcoords = np.transpose(np.where(vertexarray == True))
cg_array = np.eye(len(vertcoords))
for idx, vals in enumerate(vertcoords):
x_val_1, y_val_1 = vals
for jdx, wals in enumerate(vertcoords):
x_diff = wals[0] - vals[0]
y_diff = wals[1] - vals[1]
cg_array[idx,jdx] = np.sqrt(x_diff**2 + y_diff**2)
return cg_array
This works, of course, but my question is: can this same array be generated without the nested for loops?
Use the function scipy.spatial.distance.cdist():
import numpy as np
def array_to_complete_graph(vertexarray):
vertcoords = np.transpose(np.where(vertexarray == True))
cg_array = np.eye(len(vertcoords))
for idx, vals in enumerate(vertcoords):
x_val_1, y_val_1 = vals
for jdx, wals in enumerate(vertcoords):
x_diff = wals[0] - vals[0]
y_diff = wals[1] - vals[1]
cg_array[idx,jdx] = np.sqrt(x_diff**2 + y_diff**2)
return cg_array
arr = np.random.rand(10, 20) > 0.75
from scipy.spatial.distance import cdist
y, x = np.where(arr)
p = np.c_[x, y]
dist = cdist(p, p)
np.allclose(array_to_complete_graph(arr), dist)

Categories

Resources