Skip to content

Commit 07e00f2

Browse files
committed
[CLN]
1 parent b3364fb commit 07e00f2

File tree

3 files changed

+1
-28
lines changed

3 files changed

+1
-28
lines changed

gempy_engine/API/dual_contouring/_dual_contouring.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
2929
use_parallel = _should_use_parallel_processing(dc_data_per_stack.n_surfaces_to_export, BackendTensor.engine_backend)
3030
parallel_results = None
3131

32-
if use_parallel and True:
32+
if use_parallel and False: # ! (Miguel Sep 25) I do not see a speedup
3333
print(f"Using parallel processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
3434
parallel_results = _parallel_process_surfaces(dc_data_per_stack, left_right_codes, debug)
3535

@@ -38,7 +38,6 @@ def compute_dual_contouring(dc_data_per_stack: DualContouringData, left_right_co
3838
print(f"Using sequential processing for {dc_data_per_stack.n_surfaces_to_export} surfaces")
3939
stack_meshes: List[DualContouringMesh] = []
4040

41-
last_surface_edge_idx = 0
4241
for i in range(dc_data_per_stack.n_surfaces_to_export):
4342
# @off
4443
if parallel_results is not None:

gempy_engine/core/data/dual_contouring_data.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ class DualContouringData:
2121
_gradients: np.ndarray = None
2222

2323
tree_depth: int = -1
24-
last_surface_edge_idx: int = 0
2524
# Water tight
2625
mask: np.ndarray = None
2726

gempy_engine/modules/dual_contouring/_parallel_triangulation.py

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -102,43 +102,25 @@ def _process_single_surface(i, dc_data_per_stack, valid_edges_per_surface, left_
102102
tree_depth=dc_data_per_stack.tree_depth
103103
)
104104

105-
print(f"DEBUG: Processing surface {i}")
106-
107105
if left_right_codes is None:
108106
# Legacy triangulation
109107
indices = triangulate_dual_contouring(dc_data_per_surface)
110108
else:
111-
# Fancy triangulation
112-
print(f"DEBUG: Creating edges_normals tensor")
113-
114-
# Check BackendTensor.dtype_obj
115-
print(f"DEBUG: BackendTensor.dtype_obj = {BackendTensor.dtype_obj}")
116-
117109
edges_normals = BackendTensor.t.zeros((valid_edges.shape[0], 12, 3), dtype=BackendTensor.dtype_obj)
118-
print(f"DEBUG: edges_normals dtype: {edges_normals.dtype if hasattr(edges_normals, 'dtype') else 'No dtype attr'}")
119-
120-
# Set to NaN - this might be where the error occurs
121-
print(f"DEBUG: Setting edges_normals to NaN")
122110
if BackendTensor.engine_backend == AvailableBackends.PYTORCH:
123111
edges_normals[:] = float('nan') # Use Python float nan instead of np.nan
124112
else:
125113
edges_normals[:] = np.nan
126114

127115
# Get gradient data
128-
print(f"DEBUG: Getting gradient data")
129116
gradient_data = dc_data_per_stack.gradients[slice_object]
130-
print(f"DEBUG: gradient_data shape: {gradient_data.shape}, dtype: {gradient_data.dtype if hasattr(gradient_data, 'dtype') else 'No dtype attr'}")
131117

132118
# Fix dtype mismatch by ensuring compatible dtypes
133119
if BackendTensor.engine_backend == AvailableBackends.PYTORCH:
134120
if hasattr(gradient_data, 'dtype') and hasattr(edges_normals, 'dtype'):
135-
print(f"DEBUG: Comparing dtypes - edges_normals: {edges_normals.dtype}, gradient_data: {gradient_data.dtype}")
136121
if gradient_data.dtype != edges_normals.dtype:
137-
print(f"DEBUG: Converting gradient_data from {gradient_data.dtype} to {edges_normals.dtype}")
138122
gradient_data = gradient_data.to(edges_normals.dtype)
139123

140-
print(f"DEBUG: Assigning gradient data to edges_normals")
141-
print(f"DEBUG: valid_edges shape: {valid_edges.shape}, sum: {valid_edges.sum()}")
142124
edges_normals[valid_edges] = gradient_data
143125

144126
if BackendTensor.engine_backend != AvailableBackends.PYTORCH:
@@ -147,7 +129,6 @@ def _process_single_surface(i, dc_data_per_stack, valid_edges_per_surface, left_
147129
voxel_normal = np.nanmean(edges_normals, axis=1)
148130
voxel_normal = voxel_normal[(~np.isnan(voxel_normal).any(axis=1))]
149131
else:
150-
print(f"DEBUG: Computing voxel normals with PyTorch")
151132
# PyTorch tensor operations
152133
nan_mask = BackendTensor.t.isnan(edges_normals)
153134
valid_count = (~nan_mask).sum(dim=1)
@@ -156,28 +137,22 @@ def _process_single_surface(i, dc_data_per_stack, valid_edges_per_surface, left_
156137
sum_normals = BackendTensor.t.sum(safe_normals, 1)
157138
voxel_normal = sum_normals / valid_count.clamp(min=1)
158139
voxel_normal = voxel_normal[valid_count > 0].reshape(-1, 3)
159-
print(f"DEBUG: voxel_normal shape: {voxel_normal.shape}, dtype: {voxel_normal.dtype}")
160140

161141
valid_voxels = dc_data_per_surface.valid_voxels
162142
left_right_per_surface = left_right_codes[valid_voxels]
163143
valid_voxels_per_surface = dc_data_per_surface.valid_edges[valid_voxels]
164144
tree_depth_per_surface = dc_data_per_surface.tree_depth
165145

166-
print(f"DEBUG: Calling triangulate function")
167146
indices = triangulate(
168147
left_right_array=left_right_per_surface,
169148
valid_edges=valid_voxels_per_surface,
170149
tree_depth=tree_depth_per_surface,
171150
voxel_normals=voxel_normal
172151
)
173-
print(f"DEBUG: triangulate returned, concatenating indices")
174152
indices = BackendTensor.t.concatenate(indices, axis=0)
175153

176-
print(f"DEBUG: Converting to numpy")
177154
# vertices_numpy = BackendTensor.t.to_numpy(vertices)
178155
indices_numpy = BackendTensor.t.to_numpy(indices)
179-
180-
print(f"DEBUG: Successfully processed surface {i}")
181156
return indices_numpy
182157

183158
except Exception as e:

0 commit comments

Comments
 (0)