Skip to content

Commit 5999fb1

Browse files
authored
[ENH] Implement surface-based dual contouring with optimized triangulation (#35)
# [ENH] Dual contouring v2 This PR introduces a new version of the dual contouring algorithm with significant improvements: - Added a new implementation `_dual_contouring_v2.py` that processes surfaces individually - Refactored the data flow to handle surfaces more efficiently - Renamed `all_stack_intersection` to `all_surfaces_intersection` for clarity - Added surface slicing functionality to process individual surfaces - Removed unused `left_right` parameter from `DualContouringMesh` class - Optimized triangulation logic in `fancy_triangulation.py` with more efficient algorithms - Enhanced voxel mapping with improved search methods using `isin` and optimized mask-to-indices conversion - Added trimesh-based mesh cleanup as a final processing step - Improved parallel processing capabilities with better worker management - Added legacy mode toggle to maintain backward compatibility The new implementation provides better performance by processing each surface independently and using more efficient data structures and algorithms.
2 parents a9720a2 + 8a0849f commit 5999fb1

File tree

8 files changed

+291
-88
lines changed

8 files changed

+291
-88
lines changed

gempy_engine/API/dual_contouring/multi_scalar_dual_contouring.py

Lines changed: 67 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from ...core.data.interpolation_input import InterpolationInput
1919
from ...core.data.octree_level import OctreeLevel
2020
from ...core.utils import gempy_profiler_decorator
21+
from ...modules.dual_contouring._aux import _surface_slicer
2122
from ...modules.dual_contouring.dual_contouring_interface import (find_intersection_on_edge, get_triangulation_codes,
2223
get_masked_codes, mask_generation,apply_faults_vertex_overlap)
2324

@@ -68,7 +69,7 @@ def dual_contouring_multi_scalar(
6869
)
6970

7071
# Process each scalar field
71-
all_stack_intersection = []
72+
all_surfaces_intersection = []
7273
all_valid_edges = []
7374
all_left_right_codes = []
7475
# region Interp on edges
@@ -89,49 +90,53 @@ def dual_contouring_multi_scalar(
8990
masking=mask
9091
)
9192

92-
all_stack_intersection.append(intersection_xyz)
93+
all_surfaces_intersection.append(intersection_xyz)
9394
all_valid_edges.append(valid_edges)
9495

9596
# * 5) Interpolate on edges for all stacks
9697
output_on_edges = _interp_on_edges(
97-
all_stack_intersection, data_descriptor, dual_contouring_options, interpolation_input
98+
all_surfaces_intersection, data_descriptor, dual_contouring_options, interpolation_input
9899
)
99100

100101
# endregion
101102

102103
# region Vertex gen and triangulation
103104
left_right_per_mesh = []
104105
# Generate meshes for each scalar field
105-
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
106-
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
107-
mask = all_mask_arrays[n_scalar_field]
108-
109-
dc_data = DualContouringData(
110-
xyz_on_edge=all_stack_intersection[n_scalar_field],
111-
valid_edges=all_valid_edges[n_scalar_field],
112-
xyz_on_centers=(
113-
octree_leaves.grid_centers.octree_grid.values if mask is None
114-
else octree_leaves.grid_centers.octree_grid.values[mask]
115-
),
116-
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
117-
gradients=output_on_edges[n_scalar_field],
118-
n_surfaces_to_export=output.scalar_field_at_sp.shape[0],
119-
tree_depth=options.number_octree_levels,
120-
)
121-
122-
meshes: List[DualContouringMesh] = compute_dual_contouring(
123-
dc_data_per_stack=dc_data,
124-
left_right_codes=all_left_right_codes[n_scalar_field],
125-
debug=options.debug
106+
if LEGACY:=False:
107+
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
108+
_compute_meshes_legacy(all_left_right_codes, all_mask_arrays, all_meshes, all_surfaces_intersection, all_valid_edges, n_scalar_field, octree_leaves, options, output_on_edges)
109+
else:
110+
dc_data_per_surface_all = []
111+
for n_scalar_field in range(data_descriptor.stack_structure.n_stacks):
112+
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
113+
mask = all_mask_arrays[n_scalar_field]
114+
n_surfaces_to_export = output.scalar_field_at_sp.shape[0]
115+
for surface_i in range(n_surfaces_to_export):
116+
valid_edges = all_valid_edges[n_scalar_field]
117+
valid_edges_per_surface =valid_edges.reshape((n_surfaces_to_export, -1, 12))
118+
slice_object = _surface_slicer(surface_i, valid_edges_per_surface)
119+
120+
dc_data_per_surface = DualContouringData(
121+
xyz_on_edge=all_surfaces_intersection[n_scalar_field][slice_object],
122+
valid_edges=valid_edges_per_surface[surface_i],
123+
xyz_on_centers=(
124+
octree_leaves.grid_centers.octree_grid.values if mask is None
125+
else octree_leaves.grid_centers.octree_grid.values[mask]
126+
),
127+
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
128+
left_right_codes=all_left_right_codes[n_scalar_field],
129+
gradients=output_on_edges[n_scalar_field][slice_object],
130+
n_surfaces_to_export=n_scalar_field,
131+
tree_depth=options.number_octree_levels
132+
)
133+
134+
dc_data_per_surface_all.append(dc_data_per_surface)
135+
136+
from gempy_engine.modules.dual_contouring._dual_contouring_v2 import compute_dual_contouring_v2
137+
all_meshes = compute_dual_contouring_v2(
138+
dc_data_list=dc_data_per_surface_all,
126139
)
127-
128-
for m in meshes:
129-
left_right_per_mesh.append(m.left_right)
130-
131-
# TODO: If the order of the meshes does not match the order of scalar_field_at_surface points, reorder them here
132-
if meshes is not None:
133-
all_meshes.extend(meshes)
134-
135140
# endregion
136141
if (options.debug or len(all_left_right_codes) > 1) and False:
137142
apply_faults_vertex_overlap(all_meshes, data_descriptor.stack_structure, left_right_per_mesh)
@@ -141,6 +146,36 @@ def dual_contouring_multi_scalar(
141146
# ... existing code ...
142147

143148

149+
def _compute_meshes_legacy(all_left_right_codes: list[Any], all_mask_arrays: np.ndarray,
150+
all_meshes: list[DualContouringMesh], all_stack_intersection: list[Any],
151+
all_valid_edges: list[Any], n_scalar_field: int,
152+
octree_leaves: OctreeLevel, options: InterpolationOptions, output_on_edges: list[np.ndarray]):
153+
output: InterpOutput = octree_leaves.outputs_centers[n_scalar_field]
154+
mask = all_mask_arrays[n_scalar_field]
155+
156+
dc_data = DualContouringData(
157+
xyz_on_edge=all_stack_intersection[n_scalar_field],
158+
valid_edges=all_valid_edges[n_scalar_field],
159+
xyz_on_centers=(
160+
octree_leaves.grid_centers.octree_grid.values if mask is None
161+
else octree_leaves.grid_centers.octree_grid.values[mask]
162+
),
163+
dxdydz=octree_leaves.grid_centers.octree_dxdydz,
164+
gradients=output_on_edges[n_scalar_field],
165+
n_surfaces_to_export=output.scalar_field_at_sp.shape[0],
166+
tree_depth=options.number_octree_levels,
167+
)
168+
169+
meshes: List[DualContouringMesh] = compute_dual_contouring(
170+
dc_data_per_stack=dc_data,
171+
left_right_codes=all_left_right_codes[n_scalar_field],
172+
debug=options.debug
173+
)
174+
175+
176+
# TODO: If the order of the meshes does not match the order of scalar_field_at_surface points, reorder them here
177+
if meshes is not None:
178+
all_meshes.extend(meshes)
144179

145180

146181
def _validate_stack_relations(data_descriptor: InputDataDescriptor, n_scalar_field: int) -> None:

gempy_engine/core/data/dual_contouring_data.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ class DualContouringData:
1212
dxdydz: np.ndarray | tuple[float, float, float]
1313

1414
n_surfaces_to_export: int
15+
left_right_codes: np.ndarray
1516
gradients: np.ndarray = None
1617

1718
tree_depth: int = -1
1819
# Water tight
19-
mask: np.ndarray = None
2020

2121
bias_center_mass: np.ndarray = None # * Only for testing
2222
bias_normals: np.ndarray = None # * Only for testing

gempy_engine/core/data/dual_contouring_mesh.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ class DualContouringMesh:
1010
vertices: np.ndarray
1111
edges: np.ndarray
1212
dc_data: Optional[DualContouringData] = None # * In principle we need this just for testing
13-
left_right: Optional[np.ndarray] = None
1413

1514
def __repr__(self):
1615
return f"DualContouringMesh({self.vertices.shape[0]} vertices, {self.edges.shape[0]} edges)"

gempy_engine/modules/dual_contouring/_dual_contouring.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData,
3030
use_parallel = _should_use_parallel_processing(dc_data_per_stack.n_surfaces_to_export, BackendTensor.engine_backend)
3131
parallel_results = None
3232

33-
if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
33+
if use_parallel and True: # ! (Miguel Sep 25) I do not see a speedup
3434
print(f"Using parallel processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
3535
parallel_results = _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug)
3636

@@ -70,8 +70,7 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData,
7070
DualContouringMesh(
7171
vertices_numpy,
7272
indices_numpy,
73-
dc_data_per_stack,
74-
left_right=valid_left_right_codes
73+
dc_data_per_stack
7574
)
7675
)
7776
return stack_meshes
@@ -88,10 +87,9 @@ def _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug, num_w
8887
'valid_edges' : dc_data_per_stack.valid_edges,
8988
'xyz_on_centers' : dc_data_per_stack.xyz_on_centers,
9089
'dxdydz' : dc_data_per_stack.dxdydz,
91-
'exported_fields_on_edges': dc_data_per_stack.exported_fields_on_edges,
90+
'gradients': dc_data_per_stack.gradients,
9291
'n_surfaces_to_export' : dc_data_per_stack.n_surfaces_to_export,
9392
'tree_depth' : dc_data_per_stack.tree_depth,
94-
# 'gradients': getattr(dc_data_per_stack, 'gradients', None)
9593
}
9694

9795
# Create surface index chunks
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
import os
2+
from typing import List
3+
4+
from ._gen_vertices import _generate_vertices
5+
from ._parallel_triangulation import _should_use_parallel_processing, _init_worker
6+
from ._sequential_triangulation import _compute_triangulation
7+
from ... import optional_dependencies
8+
from ...core.backend_tensor import BackendTensor
9+
from ...core.data.dual_contouring_data import DualContouringData
10+
from ...core.data.dual_contouring_mesh import DualContouringMesh
11+
from ...core.utils import gempy_profiler_decorator
12+
13+
# Multiprocessing imports
14+
try:
15+
import torch.multiprocessing as mp
16+
17+
MULTIPROCESSING_AVAILABLE = True
18+
except ImportError:
19+
import multiprocessing as mp
20+
21+
MULTIPROCESSING_AVAILABLE = False
22+
23+
24+
@gempy_profiler_decorator
25+
def compute_dual_contouring_v2(dc_data_list: list[DualContouringData], ) -> List[DualContouringMesh]:
26+
parallel_results = _parallel_process(dc_data_list)
27+
28+
if parallel_results is not None:
29+
return parallel_results
30+
31+
32+
# Fall back to sequential processing
33+
print(f"Using sequential processing for {len(dc_data_list)} surfaces")
34+
stack_meshes: List[DualContouringMesh] = []
35+
36+
for dc_data in dc_data_list:
37+
mesh = _process_one_surface(dc_data, dc_data.left_right_codes)
38+
stack_meshes.append(mesh)
39+
return stack_meshes
40+
41+
42+
def _parallel_process(dc_data_list: list[DualContouringData]):
43+
# Check if we should use parallel processing
44+
n_surfaces_to_export = len(dc_data_list)
45+
use_parallel = _should_use_parallel_processing(n_surfaces_to_export, BackendTensor.engine_backend)
46+
parallel_results = None
47+
48+
if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
49+
print(f"Using parallel processing for {n_surfaces_to_export} surfaces")
50+
parallel_results = _parallel_process_surfaces_v2(dc_data_list)
51+
52+
return parallel_results
53+
54+
55+
def _parallel_process_surfaces_v2(dc_data_list: list[DualContouringData], num_workers=None, chunk_size=2):
56+
"""Process surfaces in parallel using multiprocessing."""
57+
n_surfaces = len(dc_data_list)
58+
59+
if num_workers is None:
60+
num_workers = max(1, min(os.cpu_count() // 2, n_surfaces // 2))
61+
num_workers=3
62+
63+
# Prepare data for serialization - convert each DualContouringData to dict
64+
dc_data_dicts = []
65+
for dc_data in dc_data_list:
66+
dc_data_dict = {
67+
'xyz_on_edge' : dc_data.xyz_on_edge,
68+
'valid_edges' : dc_data.valid_edges,
69+
'xyz_on_centers' : dc_data.xyz_on_centers,
70+
'dxdydz' : dc_data.dxdydz,
71+
'gradients' : dc_data.gradients,
72+
'left_right_codes' : dc_data.left_right_codes,
73+
'n_surfaces_to_export': dc_data.n_surfaces_to_export,
74+
'tree_depth' : dc_data.tree_depth
75+
}
76+
dc_data_dicts.append(dc_data_dict)
77+
78+
# Create surface index chunks
79+
surface_indices = list(range(n_surfaces))
80+
chunks = [surface_indices[i:i + chunk_size] for i in range(0, len(surface_indices), chunk_size)]
81+
82+
try:
83+
# Use spawn context for better PyTorch compatibility
84+
ctx = mp.get_context("fork") if MULTIPROCESSING_AVAILABLE else mp
85+
86+
with ctx.Pool(processes=num_workers, initializer=_init_worker) as pool:
87+
# Submit all chunks
88+
async_results = []
89+
for chunk in chunks:
90+
result = pool.apply_async(
91+
_process_surface_batch_v2,
92+
(chunk, dc_data_dicts )
93+
)
94+
async_results.append(result)
95+
96+
# Collect results
97+
all_results = []
98+
for async_result in async_results:
99+
batch_results = async_result.get()
100+
all_results.extend(batch_results)
101+
102+
return all_results
103+
104+
except Exception as e:
105+
print(f"Parallel processing failed: {e}. Falling back to sequential processing.")
106+
return None
107+
108+
109+
def _process_surface_batch_v2(surface_indices, dc_data_dicts, left_right_codes):
110+
"""Process a batch of surfaces. This function runs in worker processes."""
111+
results = []
112+
113+
for idx in surface_indices:
114+
dc_data_dict = dc_data_dicts[idx]
115+
116+
# Reconstruct DualContouringData from dict
117+
dc_data = DualContouringData(
118+
xyz_on_edge=dc_data_dict['xyz_on_edge'],
119+
valid_edges=dc_data_dict['valid_edges'],
120+
xyz_on_centers=dc_data_dict['xyz_on_centers'],
121+
dxdydz=dc_data_dict['dxdydz'],
122+
gradients=dc_data_dict['gradients'],
123+
left_right_codes=dc_data_dict['left_right_codes'],
124+
n_surfaces_to_export=dc_data_dict['n_surfaces_to_export'],
125+
tree_depth=dc_data_dict['tree_depth']
126+
)
127+
# Process the surface
128+
mesh = _process_one_surface(dc_data, dc_data.left_right_codes)
129+
results.append(mesh)
130+
131+
return results
132+
def _process_one_surface(dc_data: DualContouringData, left_right_codes) -> DualContouringMesh:
133+
vertices = _generate_vertices(dc_data, False, None)
134+
135+
# * Average gradient for the edges
136+
valid_edges = dc_data.valid_edges
137+
edges_normals = BackendTensor.t.zeros((valid_edges.shape[0], 12, 3), dtype=BackendTensor.dtype_obj)
138+
edges_normals[:] = 0
139+
edges_normals[valid_edges] = dc_data.gradients
140+
141+
indices_numpy = _compute_triangulation(
142+
dc_data_per_surface=dc_data,
143+
left_right_codes=left_right_codes,
144+
edges_normals=edges_normals,
145+
vertex=vertices
146+
)
147+
148+
vertices_numpy = BackendTensor.t.to_numpy(vertices)
149+
if TRIMESH_LAST_PASS := True:
150+
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)
151+
152+
mesh = DualContouringMesh(vertices_numpy, indices_numpy, dc_data)
153+
return mesh
154+
155+
156+
def _last_pass(vertices, indices):
157+
# Check if trimesh is available
158+
try:
159+
trimesh = optional_dependencies.require_trimesh()
160+
mesh = trimesh.Trimesh(vertices=vertices, faces=indices)
161+
mesh.fill_holes()
162+
return mesh.vertices, mesh.faces
163+
except ImportError:
164+
return vertices, indices

gempy_engine/modules/dual_contouring/_gen_vertices.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any
1+
from typing import Any, Optional
22

33
import numpy as np
44

@@ -40,13 +40,17 @@ def _generate_vertices(dc_data_per_surface: DualContouringData, debug: bool, sli
4040
return vertices
4141

4242

43-
def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: slice, debug: bool = False):
43+
def generate_dual_contouring_vertices(dc_data_per_stack: DualContouringData, slice_surface: Optional[slice] = None, debug: bool = False):
4444
# @off
4545
n_edges = dc_data_per_stack.n_valid_edges
4646
valid_edges = dc_data_per_stack.valid_edges
4747
valid_voxels = dc_data_per_stack.valid_voxels
48-
xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface]
49-
gradients = dc_data_per_stack.gradients[slice_surface]
48+
if slice_surface is not None:
49+
xyz_on_edge = dc_data_per_stack.xyz_on_edge[slice_surface]
50+
gradients = dc_data_per_stack.gradients[slice_surface]
51+
else:
52+
xyz_on_edge = dc_data_per_stack.xyz_on_edge
53+
gradients = dc_data_per_stack.gradients
5054
# @on
5155

5256
# * Coordinates for all posible edges (12) and 3 dummy edges_normals in the center

0 commit comments

Comments
 (0)