Matplotlib: Rotate an ellipse in 3D issue [duplicate] - python

Short question
How can matplotlib 2D patches be transformed to 3D with arbitrary normals?
Long question
I would like to plot Patches in axes with 3d projection. However, the methods provided by mpl_toolkits.mplot3d.art3d only provide methods to have patches with normals along the principal axes. How can I add patches to 3d axes that have arbitrary normals?

Short answer
Copy the code below into your project and use the method
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
to transform your 2D patches to 3D patches with arbitrary normals.
from mpl_toolkits.mplot3d import art3d
def rotation_matrix(d):
"""
Calculates a rotation matrix given a vector d. The direction of d
corresponds to the rotation axis. The length of d corresponds to
the sin of the angle of rotation.
Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
"""
sin_angle = np.linalg.norm(d)
if sin_angle == 0:
return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
"""
Translates the 3D pathpatch by the amount delta.
"""
pathpatch._segment3d += delta
Long answer
Looking at the source code of art3d.pathpatch_2d_to_3d gives the following call hierarchy
art3d.pathpatch_2d_to_3d
art3d.PathPatch3D.set_3d_properties
art3d.Patch3D.set_3d_properties
art3d.juggle_axes
The transformation from 2D to 3D happens in the last call to art3d.juggle_axes. Modifying this last step, we can obtain patches in 3D with arbitrary normals.
We proceed in four steps
Project the vertices of the patch into the XY plane (pathpatch_2d_to_3d)
Calculate a rotation matrix R that rotates the z direction to the direction of the normal (rotation_matrix)
Apply the rotation matrix to all vertices (pathpatch_2d_to_3d)
Translate the resulting object in the z-direction (pathpatch_2d_to_3d)
Sample source code and the resulting plot are shown below.
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import Circle
from itertools import product
ax = axes(projection = '3d') #Create axes
p = Circle((0,0), .2) #Add a circle in the yz plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0.5, normal = 'x')
pathpatch_translate(p, (0, 0.5, 0))
p = Circle((0,0), .2, facecolor = 'r') #Add a circle in the xz plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0.5, normal = 'y')
pathpatch_translate(p, (0.5, 1, 0))
p = Circle((0,0), .2, facecolor = 'g') #Add a circle in the xy plane
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0, normal = 'z')
pathpatch_translate(p, (0.5, 0.5, 0))
for normal in product((-1, 1), repeat = 3):
p = Circle((0,0), .2, facecolor = 'y', alpha = .2)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0, normal = normal)
pathpatch_translate(p, 0.5)

Very useful piece of code, but there is a small caveat: it cannot handle normals pointing downwards because it uses only the sine of the angle.
You need to use also the cosine:
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import art3d
from mpl_toolkits.mplot3d import proj3d
import numpy as np
def rotation_matrix(v1,v2):
"""
Calculates the rotation matrix that changes v1 into v2.
"""
v1/=np.linalg.norm(v1)
v2/=np.linalg.norm(v2)
cos_angle=np.dot(v1,v2)
d=np.cross(v1,v2)
sin_angle=np.linalg.norm(d)
if sin_angle == 0:
M = np.identity(3) if cos_angle>0. else -np.identity(3)
else:
d/=sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + cos_angle * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1,0,0), index)
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
M = rotation_matrix(normal,(0, 0, 1)) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
"""
Translates the 3D pathpatch by the amount delta.
"""
pathpatch._segment3d += delta

Here's a more generalmethod that allows embedding in more complex ways than along a normal:
class EmbeddedPatch2D(art3d.PathPatch3D):
def __init__(self, patch, transform):
assert transform.shape == (4, 3)
self._patch2d = patch
self.transform = transform
self._path2d = patch.get_path()
self._facecolor2d = patch.get_facecolor()
self.set_3d_properties()
def set_3d_properties(self, *args, **kwargs):
# get the fully-transformed path
path = self._patch2d.get_path()
trans = self._patch2d.get_patch_transform()
path = trans.transform_path(path)
# copy across the relevant properties
self._code3d = path.codes
self._facecolor3d = self._patch2d.get_facecolor()
# calculate the transformed vertices
verts = np.empty(path.vertices.shape + np.array([0, 1]))
verts[:,:-1] = path.vertices
verts[:,-1] = 1
self._segment3d = verts.dot(self.transform.T)[:,:-1]
def __getattr__(self, key):
return getattr(self._patch2d, key)
To use this as desired in the question, we need a helper function
def matrix_from_normal(normal):
"""
given a normal vector, builds a homogeneous rotation matrix such that M.dot([1, 0, 0]) == normal
"""
normal = normal / np.linalg.norm(normal)
res = np.eye(normal.ndim+1)
res[:-1,0] = normal
if normal [0] == 0:
perp = [0, -normal[2], normal[1]]
else:
perp = np.cross(normal, [1, 0, 0])
perp /= np.linalg.norm(perp)
res[:-1,1] = perp
res[:-1,2] = np.cross(self.dir, perp)
return res
All together:
circ = Circle((0,0), .2, facecolor = 'y', alpha = .2)
# the matrix here turns (x, y, 1) into (0, x, y, 1)
mat = matrix_from_normal([1, 1, 0]).dot([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
circ3d = EmbeddedPatch2D(circ, mat)

I want to share my solution that extends the former proposals.
It enables both 3d elements and text to be added to a Axes3D presentation.
# creation of a rotation matrix that preserves the x-axis in an xy-plane of the original coordinate system
def rotationMatrix(normal):
norm = np.linalg.norm(normal)
if norm ==0: return Rotation.identity(None)
zDir = normal/norm
if np.abs(zDir[2])==1:
yDir = np.array([0,zDir[2],0])
else:
yDir = (np.array([0,0,1]) - zDir[2]*zDir)/math.sqrt(1-zDir[2]**2)
rotMat = np.empty((3,3))
rotMat[:,0] = np.cross(zDir,yDir)
rotMat[:,1] = yDir
rotMat[:,2] = -zDir
return Rotation.from_matrix(rotMat)
def toVector(vec):
if vec is None or not isinstance(vec,np.ndarray) : vec="z"
if isinstance(vec,str):
zdir = vec[0] if len(vec)>0 else "z"
if not zdir in "xyz": zdir="z"
index = "xyz".index(vec)
return np.roll((1.0,0,0), index)
else:
return vec
# Transforms a 2D Patch to a 3D patch using a pivot point and a the given normal vector.
def pathpatch_2d_to_3d(pathpatch, pivot=np.zeros(3), zDir='z'):
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = mplot3d.art3d.PathPatch3D #Change the class
pathpatch._path2d = path #Copy the 2d path
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
# Get the 2D vertices and add the third dimension
verts3d = np.empty((path.vertices.shape[0],3))
verts3d[:,0:2] = path.vertices
verts3d[:,2] = pivot[2]
R = rotationMatrix(toVector(zDir))
pathpatch._segment3d = R.apply(verts3d - pivot) + pivot
return pathpatch
# places a 3D text element in axes with 3d projection.
def text3d(xyz, text, zDir="z", scalefactor=1.0, fp=FontProperties(), **kwargs):
pt = PathPatch(TextPath(xyz[0:2], text, size=scalefactor*fp.get_size(), prop=fp , usetex=False),**kwargs)
ax3D.add_patch(pathpatch_2d_to_3d(pt, xyz, zDir))
# places a 3D circle in axes with 3d projection.
def circle3d(center, radius, zDir='z', **kwargs):
pc = Circle(center[0:2], radius, **kwargs)
ax3D.add_patch(pathpatch_2d_to_3d(pc, center, zDir))

Related

How to generate points within rectangle, at random locations and without overlap?

I have an image with width: 1980 and height: 1080.
Ultimately, I want to place various shapes within the image, but at random locations and in such a way that they do not overlap. The 0,0 coordinates of the image are in the center.
Before rendering the shapes into the image (I don't need help with this), I need to write an algorithm to generate the XY points/locations. I want to be able to specify the minimum distance any given point is allowed to get to any other points.
How can do this?
All I have been able to do, is to generate points at equally spaced locations and then add a bit of randomness to each point. But this is not ideal, because it means points just vary within some 'cell' within a grid, and if the randomness value is too high, they will appear outside of the rectangle. Here is my code:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from random import randrange
def is_square(integer):
root = np.sqrt(integer)
return integer == int(root + 0.5) ** 2
def perfect_sqr(n):
nextN = np.floor(np.sqrt(n)) + 1
return int(nextN * nextN)
def generate_cells(width = 1920, height = 1080, n = 9, show_plot=False):
# If the number is not a perfect square, we need to find the next number which is
# so that we can get the root N, which will be used to determine the number of rows/columns
if not is_square(n):
n = perfect_sqr(n)
N = np.sqrt(n)
# generate x and y lists, where each represents an array of points evenly spaced between 0 and the width/height
x = np.array(list(range(0, width, int(width/N))))
y = np.array(list(range(0, height, int(height/N))))
# center the points within each 'cell'
x_centered = x+int(width/N)/2
y_centered = y+int(height/N)/2
x_centered = [a+randrange(50) for a in x_centered]
y_centered = [a+randrange(50) for a in y_centered]
# generate a grid with the points
xv, yv = np.meshgrid(x_centered, y_centered)
if(show_plot):
plt.scatter(xv,yv)
plt.gca().add_patch(Rectangle((0,0),width, height,edgecolor='red', facecolor='none', lw=1))
plt.show()
# convert the arrays to 1D
xx = xv.flatten()
yy = yv.flatten()
# Merge them side-by-side
zips = zip(xx, yy)
# convert to set of points/tuples and return
return set(zips)
coords = generate_cells(width=1920, height=1080, n=15, show_plot=True)
print(coords)
Assuming you simply want to randomly define non-overlapping coordinates within the confines of a maximum image size subject to not having images overlap, this might be a good solution.
import numpy as np
def locateImages(field_height: int, field_width: int, min_sep: int, points: int)-> np.array:
h_range = np.array(range(min_sep//2, field_height - (min_sep//2), min_sep))
w_range = np.array(range(min_sep//2, field_width-(min_sep//2), min_sep))
mx_len = max(len(h_range), len(w_range))
if len(h_range) < mx_len:
xtra = np.random.choice(h_range, mx_len - len(h_range))
h_range = np.append(h_range, xtra)
if len(w_range) < mx_len:
xtra = np.random.choice(w_range, mx_len - len(w_range))
w_range = np.append(w_range, xtra)
h_points = np.random.choice(h_range, points, replace=False)
w_points = np.random.choice(w_range, points, replace=False)
return np.concatenate((np.vstack(h_points), np.vstack(w_points)), axis= 1)
Then given:
field_height = the vertical coordinate of the Image space
field_width = the maximum horizontal coordinate of the Image space
min_sep = the minimum spacing between images
points = number of coordinates to be selected
Then:
locateImages(15, 8, 2, 5) will yield:
array([[13, 1],
[ 7, 3],
[ 1, 5],
[ 5, 5],
[11, 5]])
Render the output:
points = locateImages(1080, 1920, 100, 15)
x,y= zip(*points)
plt.scatter(x,x)
plt.gca().add_patch(Rectangle((0,0),1920, 1080,edgecolor='red', facecolor='none', lw=1))
plt.show()

How to use vtkOBBTree and IntersectWithLine to find the intersection of a line and a PolyDataFilter in python using vtk?

I have an oriented cylinder generated with vtkCylinderSource and some transformations are applied on it to get the orientation that i want. Here is the code for creating this oriented-cylinder:
def cylinder_object(startPoint, endPoint, radius, my_color="DarkRed", opacity=1):
colors = vtk.vtkNamedColors()
# Create a cylinder.
# Cylinder height vector is (0,1,0).
# Cylinder center is in the middle of the cylinder
cylinderSource = vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetResolution(50)
# Generate a random start and end point
# startPoint = [0] * 3
# endPoint = [0] * 3
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070) # For testing.8775070
# Compute a basis
normalizedX = [0] * 3
normalizedY = [0] * 3
normalizedZ = [0] * 3
# The X axis is a vector from start to end
vtk.vtkMath.Subtract(endPoint, startPoint, normalizedX)
length = vtk.vtkMath.Norm(normalizedX)
vtk.vtkMath.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = [0] * 3
for i in range(0, 3):
rng.Next()
arbitrary[i] = rng.GetRangeValue(-10, 10)
vtk.vtkMath.Cross(normalizedX, arbitrary, normalizedZ)
vtk.vtkMath.Normalize(normalizedZ)
# The Y axis is Z cross X
vtk.vtkMath.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtk.vtkMatrix4x4()
# Create the direction cosine matrix
matrix.Identity()
for i in range(0, 3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtk.vtkTransform()
transform.Translate(startPoint) # translate to starting point
transform.Concatenate(matrix) # apply direction cosines
transform.RotateZ(-90.0) # align cylinder to x axis
transform.Scale(1.0, length, 1.0) # scale along the height vector
transform.Translate(0, .5, 0) # translate to start of cylinder
# Transform the polydata
transformPD = vtk.vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(cylinderSource.GetOutputPort())
cylinderSource.Update()
# Create a mapper and actor for the arrow
mapper = vtk.vtkPolyDataMapper()
actor = vtk.vtkActor()
if USER_MATRIX:
mapper.SetInputConnection(cylinderSource.GetOutputPort())
actor.SetUserMatrix(transform.GetMatrix())
else:
mapper.SetInputConnection(transformPD.GetOutputPort())
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d(my_color))
actor.GetProperty().SetOpacity(opacity)
return actor, transformPD
Now i want to ray cast a line with this oriented cylinder. unfortunately, using the vtkCylinderSource as the dataset for vtkOBBTree produces the wrong points as the result. how can i use ray-casting with a PolyDataFilter?
I came up with a solution where i export my oriented-cylinder to a .stl file and then read it again to implement the ray-casting algorithm using IntersectWithLine. The problem is i have thousands of these oriented-cylinders and this method (exporting and reading) makes my code extremely slow.
def ray_cast(filename, p_source, p_target):
'''
:param filename: STL file to perform ray casting on.
:param p_source: first point
:param p_target: second point
:return: code --> 0 : No intersection.
:return: code --> +1 : p_source lies OUTSIDE the closed surface.
:return; code --> -1 : p_source lies INSIDE closed surface
'''
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
mesh = reader.GetOutput()
obbtree = vtk.vtkOBBTree()
obbtree.SetDataSet(mesh)
obbtree.BuildLocator()
pointsVTKIntersection = vtk.vtkPoints()
code = obbtree.IntersectWithLine(p_source, p_target, pointsVTKIntersection, None)
# Extracting data
pointsVTKIntersectionData = pointsVTKIntersection.GetData()
noPointsVTKIntersection = pointsVTKIntersectionData.GetNumberOfTuples()
pointsIntersection = []
for idx in range(noPointsVTKIntersection):
_tup = pointsVTKIntersectionData.GetTuple3(idx)
pointsIntersection.append(_tup)
return code, pointsIntersection, noPointsVTKIntersection
Below image shows the desired result using export-stl method. (the green spheres are intersection points)
I would appreciate any suggestion and help..
With vedo:
from vedo import *
cyl = Cylinder() # vtkActor
cyl.alpha(0.5).pos(3,3,3).orientation([2,1,1])
p1, p2 = (0,0,0), (4,4,5)
ipts_coords = cyl.intersectWithLine(p1, p2)
print('hit coords are', ipts_coords)
pts = Points(ipts_coords, r=10).color("yellow")
# print(pts.polydata()) # is the vtkPolyData object
origin = Point()
ln = Line(p1,p2)
show(origin, cyl, ln, pts, axes=True)

Find a easier way to cluster 2-d scatter data into grid array data

I have figured out a method to cluster disperse point data into structured 2-d array(like rasterize function). And I hope there are some better ways to achieve that target.
My work
1. Intro
1000 point data has there dimensions of properties (lon, lat, emission) whicn represent one factory located at (x,y) emit certain amount of CO2 into atmosphere
grid network: predefine the 2-d array in the shape of 20x20
http://i4.tietuku.com/02fbaf32d2f09fff.png
The code reproduced here:
#### define the map area
xc1,xc2,yc1,yc2 = 113.49805889531724,115.5030664238035,37.39995194888143,38.789235929357105
map = Basemap(llcrnrlon=xc1,llcrnrlat=yc1,urcrnrlon=xc2,urcrnrlat=yc2)
#### reading the point data and scatter plot by their position
df = pd.read_csv("xxxxx.csv")
px,py = map(df.lon, df.lat)
map.scatter(px, py, color = "red", s= 5,zorder =3)
#### predefine the grid networks
lon_grid,lat_grid = np.linspace(xc1,xc2,21), np.linspace(yc1,yc2,21)
lon_x,lat_y = np.meshgrid(lon_grid,lat_grid)
grids = np.zeros(20*20).reshape(20,20)
plt.pcolormesh(lon_x,lat_y,grids,cmap = 'gray', facecolor = 'none',edgecolor = 'k',zorder=3)
2. My target
Finding the nearest grid point for each factory
Add the emission data into this grid number
3. Algorithm realization
3.1 Raster grid
note: 20x20 grid points are distributed in this area represented by blue dot.
http://i4.tietuku.com/8548554587b0cb3a.png
3.2 KD-tree
Find the nearest blue dot of each red point
sh = (20*20,2)
grids = np.zeros(20*20*2).reshape(*sh)
sh_emission = (20*20)
grids_em = np.zeros(20*20).reshape(sh_emission)
k = 0
for j in range(0,yy.shape[0],1):
for i in range(0,xx.shape[0],1):
grids[k] = np.array([lon_grid[i],lat_grid[j]])
k+=1
T = KDTree(grids)
x_delta = (lon_grid[2] - lon_grid[1])
y_delta = (lat_grid[2] - lat_grid[1])
R = np.sqrt(x_delta**2 + y_delta**2)
for i in range(0,len(df.lon),1):
idx = T.query_ball_point([df.lon.iloc[i],df.lat.iloc[i]], r=R)
# there are more than one blue dot which are founded sometimes,
# So I'll calculate the distances between the factory(red point)
# and all blue dots which are listed
if (idx > 1):
distance = []
for k in range(0,len(idx),1):
distance.append(np.sqrt((df.lon.iloc[i] - grids[k][0])**2 + (df.lat.iloc[i] - grids[k][1])**2))
pos_index = distance.index(min(distance))
pos = idx[pos_index]
# Only find 1 point
else:
pos = idx
grids_em[pos] += df.so2[i]
4. Result
co2 = grids_em.reshape(20,20)
plt.pcolormesh(lon_x,lat_y,co2,cmap =plt.cm.Spectral_r,zorder=3)
http://i4.tietuku.com/6ded65c4ac301294.png
5. My question
Can someone point out some drawbacks or error of this method?
Is there some algorithms more aligned with my target?
Thanks a lot!
There are many for-loop in your code, it's not the numpy way.
Make some sample data first:
import numpy as np
import pandas as pd
from scipy.spatial import KDTree
import pylab as pl
xc1, xc2, yc1, yc2 = 113.49805889531724, 115.5030664238035, 37.39995194888143, 38.789235929357105
N = 1000
GSIZE = 20
x, y = np.random.multivariate_normal([(xc1 + xc2)*0.5, (yc1 + yc2)*0.5], [[0.1, 0.02], [0.02, 0.1]], size=N).T
value = np.ones(N)
df_points = pd.DataFrame({"x":x, "y":y, "v":value})
For equal space grids you can use hist2d():
pl.hist2d(df_points.x, df_points.y, weights=df_points.v, bins=20, cmap="viridis");
Here is the output:
Here is the code to use KdTree:
X, Y = np.mgrid[x.min():x.max():GSIZE*1j, y.min():y.max():GSIZE*1j]
grid = np.c_[X.ravel(), Y.ravel()]
points = np.c_[df_points.x, df_points.y]
tree = KDTree(grid)
dist, indices = tree.query(points)
grid_values = df_points.groupby(indices).v.sum()
df_grid = pd.DataFrame(grid, columns=["x", "y"])
df_grid["v"] = grid_values
fig, ax = pl.subplots(figsize=(10, 8))
ax.plot(df_points.x, df_points.y, "kx", alpha=0.2)
mapper = ax.scatter(df_grid.x, df_grid.y, c=df_grid.v,
cmap="viridis",
linewidths=0,
s=100, marker="o")
pl.colorbar(mapper, ax=ax);
the output is:

How to plot 3D ellipsoid with Mayavi

I would like to plot diffusion tensors(ellipsoid) in diffusion MRI. The data have three Eigenvalues of the corresponding diffusion tensor. I want to draw an 3D Ellipsoid with its semi-axes lengths corresponding to those three Eigenvalues.
How to do it with Mayavi?
Google brought me here and to the answer. I found how to render an ellipsoid here: https://github.com/spyke/spyke/blob/master/demo/mayavi_test.py and combined it with the arrow from here https://stackoverflow.com/a/20109619/2389450 to produce something like: http://imageshack.com/a/img673/7664/YzbTHY.png
Cheers,
Max
Code:
from mayavi.api import Engine
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
from mayavi import mlab
from tvtk.tools import visual
import numpy as np
def Arrow_From_A_to_B(x1, y1, z1, x2, y2, z2,scale=None):
ar1=visual.arrow(x=x1, y=y1, z=z1)
ar1.length_cone=0.4
arrow_length=np.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
if scale is None:
ar1.actor.scale=[arrow_length, arrow_length, arrow_length]
else:
ar1.actor.scale=scale
ar1.pos = ar1.pos/arrow_length
ar1.axis = [x2-x1, y2-y1, z2-z1]
return ar1
engine = Engine()
engine.start()
scene = engine.new_scene()
scene.scene.disable_render = True # for speed
visual.set_viewer(scene)
surfaces = []
for i in range(2):
source = ParametricSurface()
source.function = 'ellipsoid'
engine.add_source(source)
surface = Surface()
source.add_module(surface)
actor = surface.actor # mayavi actor, actor.actor is tvtk actor
#actor.property.ambient = 1 # defaults to 0 for some reason, ah don't need it, turn off scalar visibility instead
actor.property.opacity = 0.7
actor.property.color = (0,0,1) # tuple(np.random.rand(3))
actor.mapper.scalar_visibility = False # don't colour ellipses by their scalar indices into colour map
actor.property.backface_culling = True # gets rid of weird rendering artifact when opacity is < 1
actor.property.specular = 0.1
#actor.property.frontface_culling = True
actor.actor.orientation = np.array([1,0,0]) * 360 # in degrees
actor.actor.origin = np.array([0,0,0])
actor.actor.position = np.array([0,0,0])
actor.actor.scale = np.array([ 0.26490647, 0.26490647, 0.92717265])
actor.enable_texture=True
actor.property.representation = ['wireframe', 'surface'][i]
surfaces.append(surface)
Arrow_From_A_to_B(0,0,0, 0.26490647, 0, 0,np.array([0.26490647,0.4,0.4]))
Arrow_From_A_to_B(0,0,0, 0, 0.26490647, 0,np.array([0.4,0.26490647,0.4]))
Arrow_From_A_to_B(0,0,0, 0, 0, 0.92717265,np.array([0.4,0.4,0.92717265]))
source.scene.background = (1.0,1.0,1.0)
scene.scene.disable_render = False # now turn it on
# set the scalars, this has to be done some indeterminate amount of time
# after each surface is created, otherwise the scalars get overwritten
# later by their default of 1.0
for i, surface in enumerate(surfaces):
vtk_srcs = mlab.pipeline.get_vtk_src(surface)
print('len(vtk_srcs) = %d' % len(vtk_srcs))
vtk_src = vtk_srcs[0]
try: npoints = len(vtk_src.point_data.scalars)
except TypeError:
print('hit the TypeError on surface i=%d' % i)
npoints = 2500
vtk_src.point_data.scalars = np.tile(i, npoints)
# on pick, find the ellipsoid with origin closest to the picked coord,
# then check if that coord falls within that nearest ellipsoid, and if
# so, print out the ellispoid id, or pop it up in a tooltip
mlab.show()

How can an almost arbitrary plane in a 3D dataset be plotted by matplotlib?

There is an array containing 3D data of shape e.g. (64,64,64), how do you plot a plane given by a point and a normal (similar to hkl planes in crystallography), through this dataset?
Similar to what can be done in MayaVi by rotating a plane through the data.
The resulting plot will contain non-square planes in most cases.
Can those be done with matplotlib (some sort of non-rectangular patch)?
Edit: I almost solved this myself (see below) but still wonder how non-rectangular patches can be plotted in matplotlib...?
Edit: Due to discussions below I restated the question.
This is funny, a similar question I replied to just today. The way to go is: interpolation. You can use griddata from scipy.interpolate:
Griddata
This page features a very nice example, and the signature of the function is really close to your data.
You still have to somehow define the points on you plane for which you want to interpolate the data. I will have a look at this, my linear algebra lessons where a couple of years ago
I have the penultimate solution for this problem. Partially solved by using the second answer to Plot a plane based on a normal vector and a point in Matlab or matplotlib :
# coding: utf-8
import numpy as np
from matplotlib.pyplot import imshow,show
A=np.empty((64,64,64)) #This is the data array
def f(x,y):
return np.sin(x/(2*np.pi))+np.cos(y/(2*np.pi))
xx,yy= np.meshgrid(range(64), range(64))
for x in range(64):
A[:,:,x]=f(xx,yy)*np.cos(x/np.pi)
N=np.zeros((64,64))
"""This is the plane we cut from A.
It should be larger than 64, due to diagonal planes being larger.
Will be fixed."""
normal=np.array([-1,-1,1]) #Define cut plane here. Normal vector components restricted to integers
point=np.array([0,0,0])
d = -np.sum(point*normal)
def plane(x,y): # Get plane's z values
return (-normal[0]*x-normal[1]*y-d)/normal[2]
def getZZ(x,y): #Get z for all values x,y. If z>64 it's out of range
for i in x:
for j in y:
if plane(i,j)<64:
N[i,j]=A[i,j,plane(i,j)]
getZZ(range(64),range(64))
imshow(N, interpolation="Nearest")
show()
It's not the ultimate solution since the plot is not restricted to points having a z value, planes larger than 64 * 64 are not accounted for and the planes have to be defined at (0,0,0).
For the reduced requirements, I prepared a simple example
import numpy as np
import pylab as plt
data = np.arange((64**3))
data.resize((64,64,64))
def get_slice(volume, orientation, index):
orientation2slicefunc = {
"x" : lambda ar:ar[index,:,:],
"y" : lambda ar:ar[:,index,:],
"z" : lambda ar:ar[:,:,index]
}
return orientation2slicefunc[orientation](volume)
plt.subplot(221)
plt.imshow(get_slice(data, "x", 10), vmin=0, vmax=64**3)
plt.subplot(222)
plt.imshow(get_slice(data, "x", 39), vmin=0, vmax=64**3)
plt.subplot(223)
plt.imshow(get_slice(data, "y", 15), vmin=0, vmax=64**3)
plt.subplot(224)
plt.imshow(get_slice(data, "z", 25), vmin=0, vmax=64**3)
plt.show()
This leads to the following plot:
The main trick is dictionary mapping orienations to lambda-methods, which saves us from writing annoying if-then-else-blocks. Of course you can decide to give different names,
e.g., numbers, for the orientations.
Maybe this helps you.
Thorsten
P.S.: I didn't care about "IndexOutOfRange", for me it's o.k. to let this exception pop out since it is perfectly understandable in this context.
I had to do something similar for a MRI data enhancement:
Probably the code can be optimized but it works as it is.
My data is 3 dimension numpy array representing an MRI scanner. It has size [128,128,128] but the code can be modified to accept any dimensions. Also when the plane is outside the cube boundary you have to give the default values to the variable fill in the main function, in my case I choose: data_cube[0:5,0:5,0:5].mean()
def create_normal_vector(x, y,z):
normal = np.asarray([x,y,z])
normal = normal/np.sqrt(sum(normal**2))
return normal
def get_plane_equation_parameters(normal,point):
a,b,c = normal
d = np.dot(normal,point)
return a,b,c,d #ax+by+cz=d
def get_point_plane_proximity(plane,point):
#just aproximation
return np.dot(plane[0:-1],point) - plane[-1]
def get_corner_interesections(plane, cube_dim = 128): #to reduce the search space
#dimension is 128,128,128
corners_list = []
only_x = np.zeros(4)
min_prox_x = 9999
min_prox_y = 9999
min_prox_z = 9999
min_prox_yz = 9999
for i in range(cube_dim):
temp_min_prox_x=abs(get_point_plane_proximity(plane,np.asarray([i,0,0])))
# print("pseudo distance x: {0}, point: [{1},0,0]".format(temp_min_prox_x,i))
if temp_min_prox_x < min_prox_x:
min_prox_x = temp_min_prox_x
corner_intersection_x = np.asarray([i,0,0])
only_x[0]= i
temp_min_prox_y=abs(get_point_plane_proximity(plane,np.asarray([i,cube_dim,0])))
# print("pseudo distance y: {0}, point: [{1},{2},0]".format(temp_min_prox_y,i,cube_dim))
if temp_min_prox_y < min_prox_y:
min_prox_y = temp_min_prox_y
corner_intersection_y = np.asarray([i,cube_dim,0])
only_x[1]= i
temp_min_prox_z=abs(get_point_plane_proximity(plane,np.asarray([i,0,cube_dim])))
#print("pseudo distance z: {0}, point: [{1},0,{2}]".format(temp_min_prox_z,i,cube_dim))
if temp_min_prox_z < min_prox_z:
min_prox_z = temp_min_prox_z
corner_intersection_z = np.asarray([i,0,cube_dim])
only_x[2]= i
temp_min_prox_yz=abs(get_point_plane_proximity(plane,np.asarray([i,cube_dim,cube_dim])))
#print("pseudo distance z: {0}, point: [{1},{2},{2}]".format(temp_min_prox_yz,i,cube_dim))
if temp_min_prox_yz < min_prox_yz:
min_prox_yz = temp_min_prox_yz
corner_intersection_yz = np.asarray([i,cube_dim,cube_dim])
only_x[3]= i
corners_list.append(corner_intersection_x)
corners_list.append(corner_intersection_y)
corners_list.append(corner_intersection_z)
corners_list.append(corner_intersection_yz)
corners_list.append(only_x.min())
corners_list.append(only_x.max())
return corners_list
def get_points_intersection(plane,min_x,max_x,data_cube,shape=128):
fill = data_cube[0:5,0:5,0:5].mean() #this can be a parameter
extended_data_cube = np.ones([shape+2,shape,shape])*fill
extended_data_cube[1:shape+1,:,:] = data_cube
diag_image = np.zeros([shape,shape])
min_x_value = 999999
for i in range(shape):
for j in range(shape):
for k in range(int(min_x),int(max_x)+1):
current_value = abs(get_point_plane_proximity(plane,np.asarray([k,i,j])))
#print("current_value:{0}, val: [{1},{2},{3}]".format(current_value,k,i,j))
if current_value < min_x_value:
diag_image[i,j] = extended_data_cube[k,i,j]
min_x_value = current_value
min_x_value = 999999
return diag_image
The way it works is the following:
you create a normal vector:
for example [5,0,3]
normal1=create_normal_vector(5, 0,3) #this is only to normalize
then you create a point:
(my cube data shape is [128,128,128])
point = [64,64,64]
You calculate the plane equation parameters, [a,b,c,d] where ax+by+cz=d
plane1=get_plane_equation_parameters(normal1,point)
then to reduce the search space you can calculate the intersection of the plane with the cube:
corners1 = get_corner_interesections(plane1,128)
where corners1 = [intersection [x,0,0],intersection [x,128,0],intersection [x,0,128],intersection [x,128,128], min intersection [x,y,z], max intersection [x,y,z]]
With all these you can calculate the intersection between the cube and the plane:
image1 = get_points_intersection(plane1,corners1[-2],corners1[-1],data_cube)
Some examples:
normal is [1,0,0] point is [64,64,64]
normal is [5,1,0],[5,1,1],[5,0,1] point is [64,64,64]:
normal is [5,3,0],[5,3,3],[5,0,3] point is [64,64,64]:
normal is [5,-5,0],[5,-5,-5],[5,0,-5] point is [64,64,64]:
Thank you.
The other answers here do not appear to be very efficient with explicit loops over pixels or using scipy.interpolate.griddata, which is designed for unstructured input data. Here is an efficient (vectorized) and generic solution.
There is a pure numpy implementation (for nearest-neighbor "interpolation") and one for linear interpolation, which delegates the interpolation to scipy.ndimage.map_coordinates. (The latter function probably didn't exist in 2013, when this question was asked.)
import numpy as np
from scipy.ndimage import map_coordinates
def slice_datacube(cube, center, eXY, mXY, fill=np.nan, interp=True):
"""Get a 2D slice from a 3-D array.
Copyright: Han-Kwang Nienhuys, 2020.
License: any of CC-BY-SA, CC-BY, BSD, GPL, LGPL
Reference: https://stackoverflow.com/a/62733930/6228891
Parameters:
- cube: 3D array, assumed shape (nx, ny, nz).
- center: shape (3,) with coordinates of center.
can be float.
- eXY: unit vectors, shape (2, 3) - for X and Y axes of the slice.
(unit vectors must be orthogonal; normalization is optional).
- mXY: size tuple of output array (mX, mY) - int.
- fill: value to use for out-of-range points.
- interp: whether to interpolate (rather than using 'nearest')
Return:
- slice: array, shape (mX, mY).
"""
center = np.array(center, dtype=float)
assert center.shape == (3,)
eXY = np.array(eXY)/np.linalg.norm(eXY, axis=1)[:, np.newaxis]
if not np.isclose(eXY[0] # eXY[1], 0, atol=1e-6):
raise ValueError(f'eX and eY not orthogonal.')
# R: rotation matrix: data_coords = center + R # slice_coords
eZ = np.cross(eXY[0], eXY[1])
R = np.array([eXY[0], eXY[1], eZ], dtype=np.float32).T
# setup slice points P with coordinates (X, Y, 0)
mX, mY = int(mXY[0]), int(mXY[1])
Xs = np.arange(0.5-mX/2, 0.5+mX/2)
Ys = np.arange(0.5-mY/2, 0.5+mY/2)
PP = np.zeros((3, mX, mY), dtype=np.float32)
PP[0, :, :] = Xs.reshape(mX, 1)
PP[1, :, :] = Ys.reshape(1, mY)
# Transform to data coordinates (x, y, z) - idx.shape == (3, mX, mY)
if interp:
idx = np.einsum('il,ljk->ijk', R, PP) + center.reshape(3, 1, 1)
slice = map_coordinates(cube, idx, order=1, mode='constant', cval=fill)
else:
idx = np.einsum('il,ljk->ijk', R, PP) + (0.5 + center.reshape(3, 1, 1))
idx = idx.astype(np.int16)
# Find out which coordinates are out of range - shape (mX, mY)
badpoints = np.any([
idx[0, :, :] < 0,
idx[0, :, :] >= cube.shape[0],
idx[1, :, :] < 0,
idx[1, :, :] >= cube.shape[1],
idx[2, :, :] < 0,
idx[2, :, :] >= cube.shape[2],
], axis=0)
idx[:, badpoints] = 0
slice = cube[idx[0], idx[1], idx[2]]
slice[badpoints] = fill
return slice
# Demonstration
nx, ny, nz = 50, 70, 100
cube = np.full((nx, ny, nz), np.float32(1))
cube[nx//4:nx*3//4, :, :] += 1
cube[:, ny//2:ny*3//4, :] += 3
cube[:, :, nz//4:nz//2] += 7
cube[nx//3-2:nx//3+2, ny//2-2:ny//2+2, :] = 0 # black dot
Rz, Rx = np.pi/6, np.pi/4 # rotation angles around z and x
cz, sz = np.cos(Rz), np.sin(Rz)
cx, sx = np.cos(Rx), np.sin(Rx)
Rmz = np.array([[cz, -sz, 0], [sz, cz, 0], [0, 0, 1]])
Rmx = np.array([[1, 0, 0], [0, cx, -sx], [0, sx, cx]])
eXY = (Rmx # Rmz).T[:2]
slice = slice_datacube(
cube,
center=[nx/3, ny/2, nz*0.7],
eXY=eXY,
mXY=[80, 90],
fill=np.nan,
interp=False
)
import matplotlib.pyplot as plt
plt.close('all')
plt.imshow(slice.T) # imshow expects shape (mY, mX)
plt.colorbar()
Output (for interp=False):
For this test case (50x70x100 datacube, 80x90 slice size) the run time is 376 µs (interp=False) and 550 µs (interp=True) on my laptop.

Categories

Resources