@@ -140,7 +140,6 @@ def triangulate(left_right_array, valid_edges, tree_depth: int, voxel_normals, v
140140 n = n
141141 )
142142
143-
144143 indices .append (indices_patch )
145144 normals .append (normals_patch )
146145
@@ -241,57 +240,60 @@ def _compress_binary_indices(left_right_array_active_edge, edge_vector_a, edge_v
241240
242241 return compressed_binary_idx_0 , compressed_binary_idx_1 , compressed_binary_idx_2
243242
244-
245- def _map_and_filter_voxels_ (voxel_code , compressed_idx_0 , compressed_idx_1 , compressed_idx_2 ):
246- """Map compressed binary codes to all leaf codes and filter by extent."""
247- # Map remaining compressed binary code to all the binary codes at leaves
248- mapped_voxel_0 = (voxel_code - compressed_idx_0 )
249- mapped_voxel_1 = (voxel_code - compressed_idx_1 )
250- mapped_voxel_2 = (voxel_code - compressed_idx_2 )
251-
252- # Find and remove edges at the border of the extent
253- code__a_prod_edge = ~ BackendTensor .tfnp .all (mapped_voxel_0 , axis = 0 )
254- code__b_prod_edge = ~ BackendTensor .tfnp .all (mapped_voxel_1 , axis = 0 )
255- code__c_prod_edge = ~ BackendTensor .tfnp .all (mapped_voxel_2 , axis = 0 )
256-
257- valid_edges_within_extent = code__a_prod_edge * code__b_prod_edge * code__c_prod_edge
258-
259- code__a_p = BackendTensor .tfnp .array (mapped_voxel_0 [:, valid_edges_within_extent ] == 0 )
260- code__b_p = BackendTensor .tfnp .array (mapped_voxel_1 [:, valid_edges_within_extent ] == 0 )
261- code__c_p = BackendTensor .tfnp .array (mapped_voxel_2 [:, valid_edges_within_extent ] == 0 )
262-
263- return code__a_p , code__b_p , code__c_p
264-
265243def _map_and_filter_voxels (voxel_code , compressed_idx_0 , compressed_idx_1 , compressed_idx_2 ):
266- """Map compressed binary codes to all leaf codes and filter by extent (optimized)."""
244+ """Map compressed binary codes to all leaf codes and filter by extent (optimized v3 )."""
267245
268- # Instead of checking .all() on large arrays, we can use .any() on equality checks
269- # which is more efficient because:
270- # 1. We're looking for matches (== 0) rather than all non-matches (!= 0)
271- # 2. .any() can short-circuit on the first True
246+ # If voxel_code is sorted (or we can sort it once), we can use searchsorted
247+ # which is O(n log m) instead of O(n*m) for broadcasting
248+ voxel_code_flat = voxel_code .ravel ()
272249
273- # Find which voxels match each compressed index (these ARE the valid edges )
274- code__a_prod_edge = BackendTensor .tfnp .any ( voxel_code == compressed_idx_0 , axis = 0 )
275- code__b_prod_edge = BackendTensor .tfnp .any ( voxel_code == compressed_idx_1 , axis = 0 )
276- code__c_prod_edge = BackendTensor .tfnp .any ( voxel_code == compressed_idx_2 , axis = 0 )
250+ # Check membership using isin (optimized for this use case )
251+ code__a_prod_edge = BackendTensor .tfnp .isin ( compressed_idx_0 , voxel_code_flat )
252+ code__b_prod_edge = BackendTensor .tfnp .isin ( compressed_idx_1 , voxel_code_flat )
253+ code__c_prod_edge = BackendTensor .tfnp .isin ( compressed_idx_2 , voxel_code_flat )
277254
278- # Valid edges are those that have all three coordinates matching some voxel
279255 valid_edges_within_extent = code__a_prod_edge & code__b_prod_edge & code__c_prod_edge
280256
281- # Now only compute the expensive equality checks for valid edges
282- code__a_p = ( voxel_code == compressed_idx_0 [ valid_edges_within_extent ])
283- code__b_p = ( voxel_code == compressed_idx_1 [ valid_edges_within_extent ] )
284- code__c_p = ( voxel_code == compressed_idx_2 [ valid_edges_within_extent ])
257+ # Early exit if no valid edges
258+ if not BackendTensor . tfnp . any ( valid_edges_within_extent ):
259+ empty = BackendTensor . tfnp . zeros (( voxel_code . shape [ 0 ], 0 ), dtype = bool )
260+ return empty , empty , empty
285261
286- return code__a_p , code__b_p , code__c_p
262+ # Filter to valid edges only
263+ compressed_idx_0_valid = compressed_idx_0 [valid_edges_within_extent ]
264+ compressed_idx_1_valid = compressed_idx_1 [valid_edges_within_extent ]
265+ compressed_idx_2_valid = compressed_idx_2 [valid_edges_within_extent ]
287266
267+ # Final equality checks - these are unavoidable but at least filtered
268+ code__a_p = (voxel_code == compressed_idx_0_valid )
269+ code__b_p = (voxel_code == compressed_idx_1_valid )
270+ code__c_p = (voxel_code == compressed_idx_2_valid )
271+
272+ return code__a_p , code__b_p , code__c_p
288273
289274def _convert_masks_to_indices (code__a_p , code__b_p , code__c_p ):
290- """Convert boolean masks to integer indices."""
291- indices_array = BackendTensor .tfnp .arange (code__a_p .shape [0 ]).reshape (- 1 , 1 )
292- x = (code__a_p * indices_array ).T [code__a_p .T ]
293- y = (code__b_p * indices_array ).T [code__b_p .T ]
294- z = (code__c_p * indices_array ).T [code__c_p .T ]
275+ """Convert boolean masks to integer indices (optimized)."""
276+ # Use where/nonzero to find True indices
277+ # For each column, we expect exactly one True value
278+
279+ # Get the row indices where each column has True
280+ # This returns tuples of (row_indices, col_indices)
281+ x_rows , x_cols = BackendTensor .tfnp .where (code__a_p )
282+ y_rows , y_cols = BackendTensor .tfnp .where (code__b_p )
283+ z_rows , z_cols = BackendTensor .tfnp .where (code__c_p )
284+
285+ # Since each column should have exactly one True, the row_indices
286+ # are already in the right order corresponding to columns 0, 1, 2, ...
287+ # But to be safe, we can create an array and fill it
288+ n_edges = code__a_p .shape [1 ]
289+ x = BackendTensor .tfnp .zeros (n_edges , dtype = BackendTensor .tfnp .int64 )
290+ y = BackendTensor .tfnp .zeros (n_edges , dtype = BackendTensor .tfnp .int64 )
291+ z = BackendTensor .tfnp .zeros (n_edges , dtype = BackendTensor .tfnp .int64 )
292+
293+ x [x_cols ] = x_rows
294+ y [y_cols ] = y_rows
295+ z [z_cols ] = z_rows
296+
295297 return x , y , z
296298
297299
@@ -306,4 +308,4 @@ def _calculate_normals_and_order_triangles(x, y, z, voxel_normals, n):
306308 normal = voxel_normals [y , :, :].sum (1 )
307309 else :
308310 normal = BackendTensor .tfnp .ones (x .shape [0 ], dtype = BackendTensor .tfnp .float32 )
309- return normal
311+ return normal
0 commit comments