1- import numpy
2- import warnings
1+ import os
32from typing import List
43
5- import numpy as np
6-
7- from gempy_engine .config import AvailableBackends
84from ... import optional_dependencies
9-
105from ...core .backend_tensor import BackendTensor
116from ...core .data .dual_contouring_data import DualContouringData
127from ...core .data .dual_contouring_mesh import DualContouringMesh
138from ...core .utils import gempy_profiler_decorator
14- from ...modules .dual_contouring .dual_contouring_interface import triangulate_dual_contouring , generate_dual_contouring_vertices
15- from ...modules .dual_contouring .fancy_triangulation import triangulate
9+ from ...modules .dual_contouring ._parallel_triangulation import _should_use_parallel_processing , _process_surface_batch , _init_worker
10+ from ...modules .dual_contouring ._sequential_triangulation import _sequential_triangulation
11+
12+ # Multiprocessing imports
13+ try :
14+ import torch .multiprocessing as mp
15+ MULTIPROCESSING_AVAILABLE = True
16+ except ImportError :
17+ import multiprocessing as mp
18+ MULTIPROCESSING_AVAILABLE = False
19+
20+
21+
1622
1723
1824@gempy_profiler_decorator
1925def compute_dual_contouring (dc_data_per_stack : DualContouringData , left_right_codes = None , debug : bool = False ) -> List [DualContouringMesh ]:
2026 valid_edges_per_surface = dc_data_per_stack .valid_edges .reshape ((dc_data_per_stack .n_surfaces_to_export , - 1 , 12 ))
2127
22- # ? Is there a way to cut also the vertices?
28+ # Check if we should use parallel processing
29+ use_parallel = _should_use_parallel_processing (dc_data_per_stack .n_surfaces_to_export , BackendTensor .engine_backend )
30+ parallel_results = None
31+
32+ if use_parallel and False : # ! (Miguel Sep 25) I do not see a speedup
33+ print (f"Using parallel processing for { dc_data_per_stack .n_surfaces_to_export } surfaces" )
34+ parallel_results = _parallel_process_surfaces (dc_data_per_stack , left_right_codes , debug )
35+
2336
37+ # Fall back to sequential processing
38+ print (f"Using sequential processing for { dc_data_per_stack .n_surfaces_to_export } surfaces" )
2439 stack_meshes : List [DualContouringMesh ] = []
2540
26- last_surface_edge_idx = 0
2741 for i in range (dc_data_per_stack .n_surfaces_to_export ):
2842 # @off
29- valid_edges : np .ndarray = valid_edges_per_surface [i ]
30- next_surface_edge_idx : int = valid_edges .sum () + last_surface_edge_idx
31- slice_object : slice = slice (last_surface_edge_idx , next_surface_edge_idx )
32- last_surface_edge_idx : int = next_surface_edge_idx
33-
34- dc_data_per_surface = DualContouringData (
35- xyz_on_edge = dc_data_per_stack .xyz_on_edge ,
36- valid_edges = valid_edges ,
37- xyz_on_centers = dc_data_per_stack .xyz_on_centers ,
38- dxdydz = dc_data_per_stack .dxdydz ,
39- exported_fields_on_edges = dc_data_per_stack .exported_fields_on_edges ,
40- n_surfaces_to_export = dc_data_per_stack .n_surfaces_to_export ,
41- tree_depth = dc_data_per_stack .tree_depth
42-
43- )
44- vertices : np .ndarray = generate_dual_contouring_vertices (
45- dc_data_per_stack = dc_data_per_surface ,
46- slice_surface = slice_object ,
47- debug = debug
48- )
49-
50- if left_right_codes is None :
51- # * Legacy triangulation
52- indices = triangulate_dual_contouring (dc_data_per_surface )
43+ if parallel_results is not None :
44+ _ , vertices_numpy = _sequential_triangulation (
45+ dc_data_per_stack ,
46+ debug ,
47+ i ,
48+ left_right_codes ,
49+ valid_edges_per_surface ,
50+ compute_indices = False
51+ )
52+ indices_numpy = parallel_results [i ]
5353 else :
54- # * Fancy triangulation 👗
55-
56- # * Average gradient for the edges
57- edges_normals = BackendTensor .t .zeros ((valid_edges .shape [0 ], 12 , 3 ), dtype = BackendTensor .dtype_obj )
58- edges_normals [:] = np .nan
59- edges_normals [valid_edges ] = dc_data_per_stack .gradients [slice_object ]
60-
61- # if LEGACY:=True:
62- if BackendTensor .engine_backend != AvailableBackends .PYTORCH :
63- with warnings .catch_warnings ():
64- warnings .simplefilter ("ignore" , category = RuntimeWarning )
65- voxel_normal = np .nanmean (edges_normals , axis = 1 )
66- voxel_normal = voxel_normal [(~ np .isnan (voxel_normal ).any (axis = 1 ))] # drop nans
67- pass
68- else :
69- # Assuming edges_normals is a PyTorch tensor
70- nan_mask = BackendTensor .t .isnan (edges_normals )
71- valid_count = (~ nan_mask ).sum (dim = 1 )
72-
73- # Replace NaNs with 0 for sum calculation
74- safe_normals = edges_normals .clone ()
75- safe_normals [nan_mask ] = 0
76-
77- # Compute the sum of non-NaN elements
78- sum_normals = BackendTensor .t .sum (safe_normals , 1 )
79-
80- # Calculate the mean, avoiding division by zero
81- voxel_normal = sum_normals / valid_count .clamp (min = 1 )
82-
83- # Remove rows where all elements were NaN (and hence valid_count is 0)
84- voxel_normal = voxel_normal [valid_count > 0 ].reshape (- 1 , 3 )
85-
86-
87- valid_voxels = dc_data_per_surface .valid_voxels
88- indices = triangulate (
89- left_right_array = left_right_codes [valid_voxels ],
90- valid_edges = dc_data_per_surface .valid_edges [valid_voxels ],
91- tree_depth = dc_data_per_surface .tree_depth ,
92- voxel_normals = voxel_normal
54+ indices_numpy , vertices_numpy = _sequential_triangulation (
55+ dc_data_per_stack ,
56+ debug ,
57+ i ,
58+ left_right_codes ,
59+ valid_edges_per_surface ,
60+ compute_indices = True
9361 )
94- indices = BackendTensor .t .concatenate (indices , axis = 0 )
95-
96- # @on
97- vertices_numpy = BackendTensor .t .to_numpy (vertices )
98- indices_numpy = BackendTensor .t .to_numpy (indices )
99-
62+
10063 if TRIMESH_LAST_PASS := True :
10164 vertices_numpy , indices_numpy = _last_pass (vertices_numpy , indices_numpy )
102-
65+
10366 stack_meshes .append (
10467 DualContouringMesh (
10568 vertices_numpy ,
@@ -110,6 +73,55 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
11073 return stack_meshes
11174
11275
76+
77+
78+ def _parallel_process_surfaces (dc_data_per_stack , left_right_codes , debug , num_workers = None , chunk_size = 2 ):
79+ """Process surfaces in parallel using multiprocessing."""
80+ if num_workers is None :
81+ num_workers = max (1 , min (os .cpu_count () // 2 , dc_data_per_stack .n_surfaces_to_export // 2 ))
82+
83+ # Prepare data for serialization
84+ dc_data_dict = {
85+ 'xyz_on_edge' : dc_data_per_stack .xyz_on_edge ,
86+ 'valid_edges' : dc_data_per_stack .valid_edges ,
87+ 'xyz_on_centers' : dc_data_per_stack .xyz_on_centers ,
88+ 'dxdydz' : dc_data_per_stack .dxdydz ,
89+ 'exported_fields_on_edges' : dc_data_per_stack .exported_fields_on_edges ,
90+ 'n_surfaces_to_export' : dc_data_per_stack .n_surfaces_to_export ,
91+ 'tree_depth' : dc_data_per_stack .tree_depth ,
92+ # 'gradients': getattr(dc_data_per_stack, 'gradients', None)
93+ }
94+
95+ # Create surface index chunks
96+ surface_indices = list (range (dc_data_per_stack .n_surfaces_to_export ))
97+ chunks = [surface_indices [i :i + chunk_size ] for i in range (0 , len (surface_indices ), chunk_size )]
98+
99+ try :
100+ # Use spawn context for better PyTorch compatibility
101+ ctx = mp .get_context ("spawn" ) if MULTIPROCESSING_AVAILABLE else mp
102+
103+ with ctx .Pool (processes = num_workers , initializer = _init_worker ) as pool :
104+ # Submit all chunks
105+ async_results = []
106+ for chunk in chunks :
107+ result = pool .apply_async (
108+ _process_surface_batch ,
109+ (chunk , dc_data_dict , left_right_codes , debug )
110+ )
111+ async_results .append (result )
112+
113+ # Collect results
114+ all_results = []
115+ for async_result in async_results :
116+ batch_results = async_result .get ()
117+ all_results .extend (batch_results )
118+
119+ return all_results
120+
121+ except Exception as e :
122+ print (f"Parallel processing failed: { e } . Falling back to sequential processing." )
123+ return None
124+
113125def _last_pass (vertices , indices ):
114126 # Check if trimesh is available
115127 try :
@@ -118,4 +130,4 @@ def _last_pass(vertices, indices):
118130 mesh .fill_holes ()
119131 return mesh .vertices , mesh .faces
120132 except ImportError :
121- return vertices , indices
133+ return vertices , indices
0 commit comments