Optimise 3d surface recontruction from a point cloud - python

I am quite new to o3d, I'd like someone to show me around based on my code :)
I am trying to reconstruct a surface from (few) experimental data. I'd like to have as much flexibility/tunability as possible.
My code is something like this:
import open3d as o3d
sys.path.append('..')
output_path=(r"C:\Users\Giammarco\Desktop\PYTHON_graphs\OUTPUTS\\")
poisson_mesh=[]
densities=[]
pcd = o3d.geometry.PointCloud()
pcd.normals = o3d.utility.Vector3dVector(np.zeros((1, 3))) # invalidate existing normals
#load the point cloud
point_cloud=np.array([x,y,z]).T
cloud = PyntCloud.from_instance("open3d", pcd)
pcd.points = o3d.utility.Vector3dVector(point_cloud)
#resise the scale of the sample
vox_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, 1.)
#presetn in all approaches of plc
kdtree = cloud.add_structure("kdtree")
testc = cloud.get_neighbors(k=5)
distances = pcd.compute_nearest_neighbor_distance()
avg_dist = np.mean(distances)
#compute the normals
pcd.estimate_normals(); #mandatory
#orient the normals
#Number of nearest neighbours: 5 is the minimum to have a closed surface with scale >= 2
pcd.orient_normals_consistent_tangent_plane(7)
#Poisson algorithm
poisson_mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9, width=0, scale=3.5, linear_fit=False)
bbox = pcd.get_axis_aligned_bounding_box()
p_mesh_crop = poisson_mesh.crop(bbox)
# cleaning
# p_mesh_crop =poisson_mesh.simplify_quadric_decimation(6000)
# p_mesh_crop.remove_unreferenced_vertices
# p_mesh_crop.remove_degenerate_triangles()
# p_mesh_crop.remove_duplicated_triangles()
# p_mesh_crop.remove_duplicated_vertices()
# p_mesh_crop.remove_non_manifold_edges()
#designing the surface colour
#densities are the real density of features
densities = np.asarray(densities)
density_colors = plt.get_cmap('viridis')((dgo - dgo.min()) / (dgo.max() - dgo.min()))
density_colors = density_colors[:, :3]
#works for the plotting in o3d
poisson_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors)
o3d.io.write_triangle_mesh(output_path+"bpa_mesh.ply", dec_mesh);
o3d.io.write_triangle_mesh(output_path+"p_mesh_c.ply", poisson_mesh);
# o3d.io.write_triangle_mesh(output_path+"p_mesh_c.ply", p_mesh_crop);
# my_lods = lod_mesh_export(p_mesh_crop, [100000,50000,10000,1000,100], ".ply", output_path)
my_lods = lod_mesh_export(poisson_mesh, [100000,50000,10000,1000,100], ".ply", output_path)
# o3d.visualization.draw_geometries([pcd, p_mesh_crop], mesh_show_back_face=True)
# o3d.visualization.draw_geometries([pcd, poisson_mesh],mesh_show_back_face=True)
# o3d.visualization.draw_geometries([pcd, poisson_mesh[100000]],point_show_normal=True)
# tri_mesh_pois.show()#designing the surface colour
#densities are the real density of features
densities = np.asarray(densities)
density_colors = plt.get_cmap('viridis')((dgo - dgo.min()) / (dgo.max() - dgo.min()))
density_colors = density_colors[:, :3]
#works for the plotting in o3d
poisson_mesh.vertex_colors = o3d.utility.Vector3dVector(density_colors)
o3d.io.write_triangle_mesh(output_path+"p_mesh_c.ply", poisson_mesh);
my_lods = lod_mesh_export(poisson_mesh, [100000,50000,10000,1000,100], ".ply", output_path)
#SHORTCUTS from keyboard: n = show normals, q = quit, w = mesh
o3d.visualization.draw_geometries([pcd, poisson_mesh],mesh_show_back_face=True)
Some outputs:
I am concern about the create_from_point_cloud_poisson fit model option: is there a way to tune its parameters more than just depth and size? Is there an iterative process that I should set up for better conversion (e.g. threshold)? As you can see the distance btw the calculated surface and the experimental point is quite big.
Is the estimation of normals properly set up? In the second output, some directions are still quite random.
I tried this syntax too: pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.05,max_nn=20)); , but It doesn't converge to a closed surface, just a plane (see below)
Please, give me feedbacks on my code and suggestions on how to improve it.
Thank you for your support!

Related

Finding two linear fits on different domains in the same data

I'm trying to plot a 3rd-order polynomial, and two linear fits on the same set of data. My data looks like this:
,Frequency,Flux Density,log_freq,log_flux
0,1.25e+18,1.86e-07,18.096910013008056,-6.730487055782084
1,699000000000000.0,1.07e-06,14.84447717574568,-5.97061622231479
2,541000000000000.0,1.1e-06,14.73319726510657,-5.958607314841775
3,468000000000000.0,1e-06,14.670245853074125,-6.0
4,458000000000000.0,1.77e-06,14.660865478003869,-5.752026733638194
5,89400000000000.0,3.01e-05,13.951337518795917,-4.521433504406157
6,89400000000000.0,9.3e-05,13.951337518795917,-4.031517051446065
7,89400000000000.0,0.00187,13.951337518795917,-2.728158393463501
8,65100000000000.0,2.44e-05,13.813580988568193,-4.61261017366127
9,65100000000000.0,6.28e-05,13.813580988568193,-4.2020403562628035
10,65100000000000.0,0.00108,13.813580988568193,-2.96657624451305
11,25900000000000.0,0.000785,13.413299764081252,-3.1051303432547472
12,25900000000000.0,0.00106,13.413299764081252,-2.9746941347352296
13,25900000000000.0,0.000796,13.413299764081252,-3.099086932262331
14,13600000000000.0,0.00339,13.133538908370218,-2.469800301796918
15,13600000000000.0,0.00372,13.133538908370218,-2.4294570601181023
16,13600000000000.0,0.00308,13.133538908370218,-2.5114492834995557
17,12700000000000.0,0.00222,13.103803720955957,-2.653647025549361
18,12700000000000.0,0.00204,13.103803720955957,-2.6903698325741012
19,230000000000.0,0.133,11.361727836017593,-0.8761483590329142
22,90000000000.0,0.518,10.954242509439325,-0.28567024025476695
23,61000000000.0,1.0,10.785329835010767,0.0
24,61000000000.0,0.1,10.785329835010767,-1.0
25,61000000000.0,0.4,10.785329835010767,-0.3979400086720376
26,42400000000.0,0.8,10.627365856592732,-0.09691001300805639
27,41000000000.0,0.9,10.612783856719735,-0.045757490560675115
28,41000000000.0,0.7,10.612783856719735,-0.1549019599857432
29,41000000000.0,0.8,10.612783856719735,-0.09691001300805639
30,41000000000.0,0.6,10.612783856719735,-0.2218487496163564
31,41000000000.0,0.7,10.612783856719735,-0.1549019599857432
32,37000000000.0,1.0,10.568201724066995,0.0
33,36800000000.0,1.0,10.565847818673518,0.0
34,36800000000.0,0.98,10.565847818673518,-0.00877392430750515
35,33000000000.0,0.8,10.518513939877888,-0.09691001300805639
36,33000000000.0,1.0,10.518513939877888,0.0
37,31400000000.0,0.92,10.496929648073214,-0.036212172654444715
38,23000000000.0,1.4,10.361727836017593,0.146128035678238
39,23000000000.0,1.1,10.361727836017593,0.04139268515822508
40,23000000000.0,1.11,10.361727836017593,0.045322978786657475
41,23000000000.0,1.1,10.361727836017593,0.04139268515822508
42,22200000000.0,1.23,10.346352974450639,0.08990511143939793
43,22200000000.0,1.24,10.346352974450639,0.09342168516223506
44,21700000000.0,0.98,10.33645973384853,-0.00877392430750515
45,21700000000.0,1.07,10.33645973384853,0.029383777685209667
46,20000000000.0,1.44,10.301029995663981,0.15836249209524964
47,15400000000.0,1.32,10.187520720836464,0.12057393120584989
48,15000000000.0,1.5,10.176091259055681,0.17609125905568124
49,15000000000.0,1.5,10.176091259055681,0.17609125905568124
50,15000000000.0,1.42,10.176091259055681,0.15228834438305647
51,15000000000.0,1.43,10.176091259055681,0.1553360374650618
52,15000000000.0,1.42,10.176091259055681,0.15228834438305647
53,15000000000.0,1.47,10.176091259055681,0.1673173347481761
54,15000000000.0,1.38,10.176091259055681,0.13987908640123647
55,10700000000.0,2.59,10.02938377768521,0.4132997640812518
56,8870000000.0,2.79,9.947923619831727,0.44560420327359757
57,8460000000.0,2.69,9.927370363039023,0.42975228000240795
58,8400000000.0,2.8,9.924279286061882,0.4471580313422192
59,8400000000.0,2.53,9.924279286061882,0.40312052117581787
60,8400000000.0,2.06,9.924279286061882,0.31386722036915343
61,8300000000.0,2.58,9.919078092376074,0.41161970596323016
62,8080000000.0,2.76,9.907411360774587,0.4409090820652177
63,5010000000.0,3.68,9.699837725867246,0.5658478186735176
64,5000000000.0,0.81,9.698970004336019,-0.09151498112135022
65,5000000000.0,3.5,9.698970004336019,0.5440680443502757
66,5000000000.0,3.57,9.698970004336019,0.5526682161121932
67,4980000000.0,3.46,9.697229342759718,0.5390760987927766
68,4900000000.0,2.95,9.690196080028514,0.46982201597816303
69,4850000000.0,3.46,9.685741738602264,0.5390760987927766
70,4850000000.0,3.45,9.685741738602264,0.5378190950732742
71,4780000000.0,2.16,9.679427896612118,0.3344537511509309
72,4540000000.0,3.61,9.657055852857104,0.557507201905658
73,2700000000.0,3.5,9.431363764158988,0.5440680443502757
74,2700000000.0,3.7,9.431363764158988,0.568201724066995
75,2700000000.0,3.92,9.431363764158988,0.5932860670204573
76,2700000000.0,3.92,9.431363764158988,0.5932860670204573
77,2250000000.0,4.21,9.352182518111363,0.6242820958356683
78,1660000000.0,3.69,9.220108088040055,0.5670263661590603
79,1660000000.0,3.8,9.220108088040055,0.5797835966168101
80,1410000000.0,3.5,9.14921911265538,0.5440680443502757
81,1400000000.0,3.45,9.146128035678238,0.5378190950732742
82,1400000000.0,3.28,9.146128035678238,0.5158738437116791
83,1400000000.0,3.19,9.146128035678238,0.5037906830571811
84,1400000000.0,3.51,9.146128035678238,0.5453071164658241
85,1340000000.0,3.31,9.127104798364808,0.5198279937757188
86,1340000000.0,3.31,9.127104798364808,0.5198279937757188
87,750000000.0,3.14,8.8750612633917,0.49692964807321494
88,408000000.0,1.46,8.61066016308988,0.1643528557844371
89,408000000.0,1.46,8.61066016308988,0.1643528557844371
90,365000000.0,1.62,8.562292864456476,0.20951501454263097
91,365000000.0,1.56,8.562292864456476,0.1931245983544616
92,333000000.0,1.32,8.52244423350632,0.12057393120584989
93,302000000.0,1.23,8.48000694295715,0.08990511143939793
94,151000000.0,2.13,8.178976947293169,0.3283796034387377
95,73800000.0,3.58,7.868056361823042,0.5538830266438743
and my code is
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import numpy.polynomial.polynomial as poly
def find_extrema(poly, bounds):
'''
Finds the extrema of the polynomial; ensure real.
https://stackoverflow.com/questions/72932816/python-finding-local-maxima-minima-for-multiple-polynomials-efficiently
'''
deriv = poly.deriv()
extrema = deriv.roots()
# Filter out complex roots
extrema = extrema[np.isreal(extrema)]
# Get real part of root
extrema = np.real(extrema)
# Apply bounds check
lb, ub = bounds
extrema = extrema[(lb <= extrema) & (extrema <= ub)]
return extrema
def find_maximum(poly, bounds):
'''
Find the maximum point; returns the value of the turnover frequency.
https://stackoverflow.com/questions/72932816/python-finding-local-maxima-minima-for-multiple-polynomials-efficiently
'''
extrema = find_extrema(poly, bounds)
# Either bound could end up being the minimum. Check those too.
extrema = np.concatenate((extrema, bounds))
value_at_extrema = poly(extrema)
maximum_index = np.argmax(value_at_extrema)
return extrema[maximum_index]
# LOAD THE DATA FROM FILE HERE
# CARRY ON...
xvar = 'log_freq'
yvar = 'log_flux'
x, y = pks[xvar], pks[yvar]
lower = min(x)
upper = max(x)
# Find the 3rd-order polynomial which fits the SED
coefs = poly.polyfit(x, y, 3) # find the coeffs
x_new = np.linspace(lower, upper, num=len(x)*10) # space to plot the fit
ffit = poly.Polynomial(coefs) # find the polynomial
# Find turnover frequency and peak flux
nu_to = find_maximum(ffit, (lower, upper))
F_p = ffit(nu_to)
# HERE'S THE TRICKY BIT
# Find the straight line to fit to the left of nu_to
left_linefit = poly.polyfit(x, y, 1)
x_left = np.linspace(lower, nu_to, num=len(x)*10) # space to plot the fit
ffit_thin = poly.Polynomial(left_linefit,
domain = (lower, nu_to)
)
# PLOTS THE POLYNOMIAL WELL
ax1 = plt.subplot(1, 1, 1)
ax1.scatter(pks[xvar], pks[yvar], label = 'PKS 0742+10', c = 'b')
ax1.plot(x_new, ffit(x_new), color = 'r')
ax1.plot(x_left, ffit_left(x_left), color = 'gold')
ax1.set_yscale('linear')
ax1.set_xscale('linear')
ax1.legend()
ax1.set_xlabel(r'$\log\nu$ ($\nu$ in Hz)')
ax1.set_ylabel(r'$\log F_{\nu}$ ($F_{\nu}$ in Jy)')
ax1.grid(axis = 'both', which = 'major')
The code produces the poly fit well:
I'm trying to plot the straight-line fits for the points on either side of the maximum, as shown schematically below:
I thought I could do it with
ffit_left = poly.Polynomial(left_linefit,
domain = (lower, nu_to)
)
and similar for ffit_right, but that produces
which is actually the straight-line fit for the whole dataset, plotted only for that domain. I don't want to manipulate the dataset, because eventually I'll have to do it on a lot of datasets.
The fitting part of the code comes from an answer to this question .
How can I fit a straight line to just set of points without manipulating the dataset?
My guess is that I have to make left_linefit = poly.polyfit(x, y, 1) recognise a domain, but I can't see anything in the numpy polyfit docs.
Sorry for the long question!
I am not sure to well understand your request. If you want to fit a piecewise function made of three linear segments a method is described in https://fr.scribd.com/document/380941024/Regression-par-morceaux-Piecewise-Regression-pdf with theory and numerical examples.
Several cases are considered. Among them the case below might be convenient for you.
H(*) is the Heaviside step function.

Struggling to create watertight meshes out of point cloud data using Open3D in Python

I am trying to create a watertight mesh out of point cloud representing organ contour data from cone beam CT images. My goal is to take two meshes and calculate the volume of intersection between the two of them.
I have tried using each of the methods shown here
Poisson Reconstruction
point_cloud = np.genfromtxt('ct_prostate_contour_data.csv', delimiter=',')
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud)
pcd.compute_convex_hull()
pcd.estimate_normals()
pcd.orient_normals_consistent_tangent_plane(10)
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=10, width=0, scale=20, linear_fit=True)[0]
mesh.compute_vertex_normals()
mesh.paint_uniform_color([0.5, 0.5, 0.5])
mesh.remove_degenerate_triangles()
o3d.visualization.draw_geometries([pcd, mesh], mesh_show_back_face=True)
While this method seemingly leads to a watertight mesh to my eye, the result of mesh.is_watertight() is False, however for the Bladder data it returns True. Furthermore, the algorithm extends the mesh above and below the vertical limits of the data. Wile this isn't a deal breaking issue if there were a way to minimize it that would be great.
Poisson Mesh Image
Ball Pivoting
point_cloud = np.genfromtxt('ct_prostate_contour_data.csv', delimiter=',')
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud)
pcd.compute_convex_hull()
pcd.estimate_normals()
pcd.orient_normals_consistent_tangent_plane(30)
distances = pcd.compute_nearest_neighbor_distance()
avg_dist = np.mean(distances)
radii = [0.1*avg_dist, 0.5*avg_dist, 1*avg_dist, 2*avg_dist]
r = o3d.utility.DoubleVector(radii)
rec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(pcd, r)
o3d.visualization.draw_geometries([pcd, rec_mesh], mesh_show_back_face=True)
This would be my preferred method if I were able to fill the holes as it simply connects vertices without interpolation. Perhaps if I were able to get this into a state where the only remaining holes were large I could convert this mesh into a Pyvista compatible mesh and use Pymeshfix to patch the holes.
Ball Pivoting Mesh Image
Alpha Shapes
point_cloud = np.genfromtxt('ct_prostate_contour_data.csv', delimiter=',')
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud)
alpha = 8
tetra_mesh, pt_map = o3d.geometry.TetraMesh.create_from_point_cloud(pcd)
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(pcd, alpha, tetra_mesh, pt_map)
mesh.compute_vertex_normals()
mesh.paint_uniform_color([0.5, 0.5, 0.5])
mesh.remove_degenerate_triangles()
o3d.visualization.draw_geometries([pcd, mesh])
The results from this are similar to ball pivoting but worse.
Alpha Shapes Mesh Image
Sample Data
ct_prostate_contour_data.csv
ct_rectum_contour_data.csv
ct_bladder_contour_data.csv
I am one of the authors of the PyVista module. We've introduced the vtkSurfaceReconstructionFilter within PyVista in pull request #1617.
import pymeshfix
import numpy as np
import pyvista as pv
pv.set_plot_theme('document')
array = np.genfromtxt('ct_prostate_contour_data.csv', delimiter=',')
point_cloud = pv.PolyData(array)
surf = point_cloud.reconstruct_surface(nbr_sz=20, sample_spacing=2)
mf = pymeshfix.MeshFix(surf)
mf.repair()
repaired = mf.mesh
pl = pv.Plotter()
pl.add_mesh(point_cloud, color='k', point_size=10)
pl.add_mesh(repaired)
pl.add_title('Reconstructed Surface')
pl.show()

Why normals on a mesh in vtk are different acording to mesh color?

I'm trying to compute the normals of a mesh in vtk for each vertex.
When I display them It seems normals vertex for green vertex have smaller length than vertex with white color.
I don't understand why I get this result.
To color the mesh I used this function :
def update_colors(self,couleurs=None,Vertex_correspondance=None):
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
for i in range(self.points.shape[0]):
if not i in Vertex_correspondance :
Colors.InsertNextTuple3(255,255,255)
else:
Colors.InsertNextTuple3(couleurs[0] , couleurs[1] , couleurs[2])
self.GetOutput.GetPointData().SetScalars(Colors)
self.GetOutput.Modified()
So I assign a green color to some vertices of the mesh (30 vertices).
To compute the normals, I use :
poly_data = self.actor.GetMapper().GetInput()
normalsCalc = vtk.vtkPolyDataNormals()
normalsCalc.SetInputData(poly_data)
normalsCalc.ComputePointNormalsOn()
normalsCalc.ComputeCellNormalsOff()
normalsCalc.SplittingOff()
normalsCalc.FlipNormalsOff()
normalsCalc.ConsistencyOn()
normalsCalc.AutoOrientNormalsOn()
normalsCalc.Update()
arrowSource = vtk.vtkArrowSource()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceConnection(arrowSource.GetOutputPort())
glyph3D.SetVectorModeToUseNormal()
glyph3D.SetInputData(normalsCalc.GetOutput())
glyph3D.SetScaleFactor(0.02)
glyph3D.OrientOn()
glyph3D.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph3D.GetOutputPort())
self.glyphActor = vtk.vtkActor()
self.glyphActor.SetMapper(mapper)
self.glyphActor.GetProperty().SetColor([0, 0, 1])
here is the display I get
Also If I compute the normals length after with
normals = []
array = normalsCalc.GetOutput().GetPointData().GetNormals()
for i in range(array.GetNumberOfTuples()):
normals.append(array.GetTuple(i))
self.Normals = np.array(normals)
np.linalg.norm(self.Normals,axis=1)
I have number really close to 1.
So the normals seems to have been computed well...
Maybe you need to use SetScaleModeToDataScalingOff(), this seems to work:
from vedo import Ellipsoid, show
import vtk
s = Ellipsoid().computeNormals()
arr = s.points()[:,2]
s.cmap('jet_r', arr)
arrowSource = vtk.vtkArrowSource()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceConnection(arrowSource.GetOutputPort())
glyph3D.SetVectorModeToUseNormal()
glyph3D.SetInputData(s.polydata())
glyph3D.SetScaleFactor(0.2)
glyph3D.OrientOn()
glyph3D.SetScaleModeToDataScalingOff() ###### <--
glyph3D.Update()
show(s, glyph3D, axes=1)

Python VTK "normalize" a point cloud

I have done a lot of searching but have yet to find an answer. I am currently working on some data of a crop field. I have PLY files for multiple fields which I have successfully read into, filtered, and visualised using Python and VTK. My main goal is to eventually segment and run analysis on individual crop plots.
However to make that task easier I first want to "Normalize" my point cloud so that all plots are essentially "on the same level". From the image I have attached you can see that the point clod slopes from one corner to its opposite. So what I want to flatten out the image so the ground points are all on the same plane/ level. And the reset of the points adjusted accordingly.
Point Cloud
I've also included my code to show how I got to this point. If anyone has any advice on how I can achieve the normalising to one plane I would be very appreciative. Sadly I cannot include my data as it is work related.
Thanks.
Josh
import vtk
from vtk.util import numpy_support
import numpy as np
filename = 'File.ply'
# Reader
r = vtk.vtkPLYReader()
r.SetFileName(filename)
# Filters
vgf = vtk.vtkVertexGlyphFilter()
vgf.SetInputConnection(r.GetOutputPort())
# Elevation
pc = r.GetOutput()
bounds = pc.GetBounds()
#print(bounds)
minz = bounds[4]
maxz = bounds[5]
#print(bounds[4], bounds[5])
evgf = vtk.vtkElevationFilter()
evgf.SetInputConnection(vgf.GetOutputPort())
evgf.SetLowPoint(0, 0, minz)
evgf.SetHighPoint(0, 0, maxz)
#pc.GetNumberOfPoints()
# Look up table
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0)
lut.SetSaturationRange(1, 1)
lut.SetValueRange(1, 1)
lut.Build
# Renderer
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(evgf.GetOutputPort())
mapper.SetLookupTable(lut)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(actor)
renderer.SetBackground(0, 0, 0)
renWin.Render()
iren.Start()
I once solved a similar problem. Find below some code that I used back then. It uses two functions fitPlane and findTransformFromVectors that you could replace with your own implementations.
Note that there are many ways to fit a plane through a set of points. This SO post discusses compares scipy.optimize.minimize with scipy.linalg.lstsq. In another SO post, the use of PCA or RANSAC and other methods are suggested. You probably want to use methods provided by sklearn, numpy or other modules. My solution simply (and non-robustly) computes ordinary least squares regression.
import vtk
import numpy as np
# Convert vtk to numpy arrays
from vtk.util.numpy_support import vtk_to_numpy as vtk2np
# Create a random point cloud.
center = [3.0, 2.0, 1.0]
source = vtk.vtkPointSource()
source.SetCenter(center)
source.SetNumberOfPoints(50)
source.SetRadius(1.)
source.Update()
source = source.GetOutput()
# Extract the points from the point cloud.
points = vtk2np(source.GetPoints().GetData())
points = points.transpose()
# Fit a plane. nRegression contains the normal vector of the
# regression surface.
nRegression = fitPlane(points)
# Compute a transform that maps the source center to the origin and
# plane normal to the z-axis.
trafo = findTransformFromVectors(originFrom=center,
axisFrom=nRegression.transpose(),
originTo=(0,0,0),
axisTo=(0.,0.,1.))
# Apply transform to source.
sourceTransformed = vtk.vtkTransformFilter()
sourceTransformed.SetInputData(source)
sourceTransformed.SetTransform(trafo)
sourceTransformed.Update()
# Visualize output...
Here my implementations of fitPlane and findTransformFromVectors:
# The following code has been written by normanius under the CC BY-SA 4.0
# license.
# License: https://creativecommons.org/licenses/by-sa/4.0/
# Author: normanius: https://stackoverflow.com/users/3388962/normanius
# Date: October 2018
# Reference: https://stackoverflow.com/questions/52716438
def fitPlane(X, tolerance=1e-10):
'''
Estimate the plane normal by means of ordinary least dsquares.
Requirement: points X span the full column rank. If the points lie in a
perfect plane, the regression problem is ill-conditioned!
Formulas:
a = (XX^T)^(-1)*X*z
Surface normal:
n = [a[0], a[1], -1]
n = n/norm(n)
Plane intercept:
c = a[2]/norm(n)
NOTE: The condition number for the pseudo-inverse improves if the
formulation is changed to homogenous notation.
Formulas (homogenous):
a = (XX^T)^(-1)*[1,1,1]^T
n = a[:-1]
n = n/norm(n)
c = a[-1]/norm(n)
Arguments:
X: A matrix with n rows and 3 columns
tolerance: Minimal condition number accepted. If the condition
number is lower, the algorithm returns None.
Returns:
If the computation was successful, a numpy array of length three is
returned that represents the estimated plane normal. On failure,
None is returned.
'''
X = np.asarray(X)
d,N = X.shape
X = np.vstack([X,np.ones([1,N])])
z = np.ones([d+1,1])
XXT = np.dot(X, np.transpose(X)) # XXT=X*X^T
if np.linalg.det(XXT) < 1e-10:
# The test covers the case where n<3
return None
n = np.dot(np.linalg.inv(XXT), z)
intercept = n[-1]
n = n[:-1]
scale = np.linalg.norm(n)
n /= scale
intercept /= scale
return n
def findTransformFromVectors(originFrom=None, axisFrom=None,
originTo=None, axisTo=None,
origin=None,
scale=1):
'''
Compute a transformation that maps originFrom and axisFrom to originTo
and axisTo respectively. If scale is set to 'auto', the scale will be
determined such that the axes will also match in length:
scale = norm(axisTo)/norm(axisFrom)
Arguments: originFrom: sequences with 3 elements, or None
axisFrom: sequences with 3 elements, or None
originTo: sequences with 3 elements, or None
axisTo: sequences with 3 elements, or None
origin: sequences with 3 elements, or None,
overrides originFrom and originTo if set
scale: - scalar (isotropic scaling)
- sequence with 3 elements (anisotropic scaling),
- 'auto' (sets scale such that input axes match
in length after transforming axisFrom)
- None (no scaling)
Align two axes alone, assuming that we sit on (0,0,0)
findTransformFromVectors(axisFrom=a0, axisTo=a1)
Align two axes in one point (all calls are equivalent):
findTransformFromVectors(origin=o, axisFrom=a0, axisTo=a1)
findTransformFromVectors(originFrom=o, axisFrom=a0, axisTo=a1)
findTransformFromVectors(axisFrom=a0, originTo=o, axisTo=a1)
Move between two points:
findTransformFromVectors(orgin=o0, originTo=o1)
Move from one position to the other and align axes:
findTransformFromVectors(orgin=o0, axisFrom=a0, originTo=o1, axisTo=a1)
'''
# Prelude with trickle-down logic.
# Infer the origins if an information is not set.
if origin is not None:
# Check for ambiguous input.
assert(originFrom is None and originTo is None)
originFrom = origin
originTo = origin
if originFrom is None:
originFrom = originTo
if originTo is None:
originTo = originFrom
if originTo is None:
# We arrive here only if no origin information was set.
originTo = [0.,0.,0.]
originFrom = [0.,0.,0.]
originFrom = np.asarray(originFrom)
originTo = np.asarray(originTo)
# Check if any rotation will be involved.
axisFrom = np.asarray(axisFrom)
axisTo = np.asarray(axisTo)
axisFromL2 = np.linalg.norm(axisFrom)
axisToL2 = np.linalg.norm(axisTo)
if axisFrom is None or axisTo is None or axisFromL2==0 or axisToL2==0:
rotate = False
else:
rotate = not np.array_equal(axisFrom, axisTo)
# Scale.
if scale is None:
scale = 1.
if scale == 'auto':
scale = axisToL2/axisFromL2 if axisFromL2!=0. else 1.
if np.isscalar(scale):
scale = scale*np.ones(3)
if rotate:
rAxis = np.cross(axisFrom.ravel(), axisTo.ravel()) # rotation axis
angle = np.dot(axisFrom, axisTo) / axisFromL2 / axisToL2
angle = np.arccos(angle)
# Here we finally compute the transform.
trafo = vtk.vtkTransform()
trafo.Translate(originTo)
if rotate:
trafo.RotateWXYZ(angle / np.pi * 180, rAxis[0], rAxis[1], rAxis[2])
trafo.Scale(scale[0],scale[1],scale[2])
trafo.Translate(-originFrom)
return trafo

Healpy: From Data to Healpix map

I have a data grid where the rows represent theta (0, pi) and the columns represent phi (0, 2*pi) and where f(theta,phi) is the density of dark matter at that location. I wanted to calculate the power spectrum for this and have decided to use healpy.
What I can not understand is how to format my data for healpy to use. If someone could provide code (in python for obvious reasons) or point me to a tutorial, that would be great! I have tried my hand at doing it with the following code:
#grid dimensions are Nrows*Ncols (subject to change)
theta = np.linspace(0, np.pi, num=grid.shape[0])[:, None]
phi = np.linspace(0, 2*np.pi, num=grid.shape[1])
nside = 512
print "Pixel area: %.2f square degrees" % hp.nside2pixarea(nside, degrees=True)
pix = hp.ang2pix(nside, theta, phi)
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
healpix_map[pix] = grid
But, when I try to execute the code to do the power spectrum. Specifically, :
cl = hp.anafast(healpix_map[pix], lmax=1024)
I get this error:
TypeError: bad number of pixels
If anyone could point me to a good tutorial or help edit my code that would be great.
More specifications:
my data is in a 2d np array and I can change the numRows/numCols if I need to.
Edit:
I have solved this problem by first changing the args of anafast to healpix_map.
I also improved the spacing by making my Nrows*Ncols=12*nside*nside.
But, my power spectrum is still giving errors. If anyone has links to good documentation/tutorial on how to calculate the power spectrum (condition of theta/phi args), that would be incredibly helpful.
There you go, hope it's what you're looking for. Feel free to comment with questions :)
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
# Set the number of sources and the coordinates for the input
nsources = int(1.e4)
nside = 16
npix = hp.nside2npix(nside)
# Coordinates and the density field f
thetas = np.random.random(nsources) * np.pi
phis = np.random.random(nsources) * np.pi * 2.
fs = np.random.randn(nsources)
# Go from HEALPix coordinates to indices
indices = hp.ang2pix(nside, thetas, phis)
# Initate the map and fill it with the values
hpxmap = np.zeros(npix, dtype=np.float)
for i in range(nsources):
hpxmap[indices[i]] += fs[i]
# Inspect the map
hp.mollview(hpxmap)
Since the map above contains nothing but noise, the power spectrum should just contain shot noise, i.e. be flat.
# Get the power spectrum
Cl = hp.anafast(hpxmap)
plt.figure()
plt.plot(Cl)
There is a faster way to do the map initialization using numpy.add.at, following this answer.
This is several times faster on my machine as compared to the first section of Daniel's excellent answer:
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
# Set the number of sources and the coordinates for the input
nsources = int(1e7)
nside = 64
npix = hp.nside2npix(nside)
# Coordinates and the density field f
thetas = np.random.uniform(0, np.pi, nsources)
phis = np.random.uniform(0, 2*np.pi, nsources)
fs = np.random.randn(nsources)
# Go from HEALPix coordinates to indices
indices = hp.ang2pix(nside, thetas, phis)
# Baseline, from Daniel Lenz's answer:
# time: ~5 s
hpxmap1 = np.zeros(npix, dtype=np.float)
for i in range(nsources):
hpxmap1[indices[i]] += fs[i]
# Using numpy.add.at
# time: ~0.6 ms
hpxmap2 = np.zeros(npix, dtype=np.float)
np.add.at(hpxmap2, indices, fs)

Categories

Resources