Skip to content

Commit ab70b6e

Browse files
committed
[WIP] Getting there
1 parent c24e3e4 commit ab70b6e

File tree

3 files changed

+60
-24
lines changed

3 files changed

+60
-24
lines changed

gempy_engine/API/dual_contouring/_dual_contouring.py

Lines changed: 37 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -36,26 +36,27 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
3636

3737
# Check if we should use parallel processing
3838
use_parallel = _should_use_parallel_processing(dc_data_per_stack.n_surfaces_to_export, BackendTensor.engine_backend)
39+
parallel_results = None
3940

4041
if use_parallel:
4142
print(f"Using parallel processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
4243
parallel_results = _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug)
4344

44-
if parallel_results is not None:
45-
# Convert parallel results to DualContouringMesh objects
46-
stack_meshes = []
47-
for vertices_numpy, indices_numpy in parallel_results:
48-
if TRIMESH_LAST_PASS := True:
49-
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)
50-
51-
stack_meshes.append(
52-
DualContouringMesh(
53-
vertices_numpy,
54-
indices_numpy,
55-
dc_data_per_stack
56-
)
57-
)
58-
return stack_meshes
45+
# if parallel_results is not None:
46+
# # Convert parallel results to DualContouringMesh objects
47+
# stack_meshes = []
48+
# for vertices_numpy, indices_numpy in parallel_results:
49+
# if TRIMESH_LAST_PASS := True:
50+
# vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)
51+
#
52+
# stack_meshes.append(
53+
# DualContouringMesh(
54+
# vertices_numpy,
55+
# indices_numpy,
56+
# dc_data_per_stack
57+
# )
58+
# )
59+
# return stack_meshes
5960

6061
# Fall back to sequential processing
6162
print(f"Using sequential processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
@@ -64,7 +65,27 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
6465
last_surface_edge_idx = 0
6566
for i in range(dc_data_per_stack.n_surfaces_to_export):
6667
# @off
67-
indices_numpy, vertices_numpy = _sequential_triangulation(dc_data_per_stack, debug, i, last_surface_edge_idx, left_right_codes, valid_edges_per_surface)
68+
if parallel_results is not None:
69+
_, vertices_numpy = _sequential_triangulation(
70+
dc_data_per_stack,
71+
debug,
72+
i,
73+
last_surface_edge_idx,
74+
left_right_codes,
75+
valid_edges_per_surface,
76+
compute_indices=False
77+
)
78+
indices_numpy = parallel_results[i]
79+
else:
80+
vertices_numpy, indices_numpy = _sequential_triangulation(
81+
dc_data_per_stack,
82+
debug,
83+
i,
84+
last_surface_edge_idx,
85+
left_right_codes,
86+
valid_edges_per_surface,
87+
compute_indices=True
88+
)
6889

6990
if TRIMESH_LAST_PASS := True:
7091
vertices_numpy, indices_numpy = _last_pass(vertices_numpy, indices_numpy)

gempy_engine/modules/dual_contouring/_parallel_triangulation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,6 @@ def _process_single_surface(i, dc_data_per_stack, valid_edges_per_surface, left_
161161
valid_voxels = dc_data_per_surface.valid_voxels
162162
left_right_per_surface = left_right_codes[valid_voxels]
163163
valid_voxels_per_surface = dc_data_per_surface.valid_edges[valid_voxels]
164-
voxel_normal_per_surface = voxel_normal[valid_voxels]
165164
tree_depth_per_surface = dc_data_per_surface.tree_depth
166165

167166
print(f"DEBUG: Calling triangulate function")

gempy_engine/modules/dual_contouring/_sequential_triangulation.py

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from typing import Any
2+
13
import numpy as np
24
import warnings
35

@@ -8,7 +10,13 @@
810
from ...modules.dual_contouring.fancy_triangulation import triangulate
911

1012

11-
def _sequential_triangulation(dc_data_per_stack: DualContouringData, debug: bool, i: int, last_surface_edge_idx: int, left_right_codes, valid_edges_per_surface: ndarray[tuple[int, int, int], dtype[Any] | Any]) -> tuple[Any, Any]:
13+
def _sequential_triangulation(dc_data_per_stack: DualContouringData,
14+
debug: bool
15+
, i: int, last_surface_edge_idx: int,
16+
left_right_codes,
17+
valid_edges_per_surface,
18+
compute_indices=True
19+
) -> tuple[Any, Any]:
1220
valid_edges: np.ndarray = valid_edges_per_surface[i]
1321
next_surface_edge_idx: int = valid_edges.sum() + last_surface_edge_idx
1422
slice_object: slice = slice(last_surface_edge_idx, next_surface_edge_idx)
@@ -24,11 +32,10 @@ def _sequential_triangulation(dc_data_per_stack: DualContouringData, debug: bool
2432
tree_depth=dc_data_per_stack.tree_depth
2533

2634
)
27-
vertices: np.ndarray = generate_dual_contouring_vertices(
28-
dc_data_per_stack=dc_data_per_surface,
29-
slice_surface=slice_object,
30-
debug=debug
31-
)
35+
vertices_numpy = _generate_vertices(dc_data_per_surface, debug, slice_object)
36+
37+
if not compute_indices:
38+
return None, vertices_numpy
3239

3340
if left_right_codes is None:
3441
# * Legacy triangulation
@@ -82,6 +89,15 @@ def _sequential_triangulation(dc_data_per_stack: DualContouringData, debug: bool
8289
indices = BackendTensor.t.concatenate(indices, axis=0)
8390

8491
# @on
85-
vertices_numpy = BackendTensor.t.to_numpy(vertices)
8692
indices_numpy = BackendTensor.t.to_numpy(indices)
8793
return indices_numpy, vertices_numpy
94+
95+
96+
def _generate_vertices(dc_data_per_surface: DualContouringData, debug: bool, slice_object: slice) -> Any:
97+
vertices: np.ndarray = generate_dual_contouring_vertices(
98+
dc_data_per_stack=dc_data_per_surface,
99+
slice_surface=slice_object,
100+
debug=debug
101+
)
102+
vertices_numpy = BackendTensor.t.to_numpy(vertices)
103+
return vertices_numpy

0 commit comments

Comments
 (0)