diff --git a/examples/10-mapping-grouping/mappings.py b/examples/10-mapping-grouping/mappings.py index 77b8fab..57dc990 100644 --- a/examples/10-mapping-grouping/mappings.py +++ b/examples/10-mapping-grouping/mappings.py @@ -1,3 +1,58 @@ +################################################################################################################################################## +################################################################################################################################################## +""" + FUNCTIONS SUMMARY + + Notation used: A group of cross-sections, when repeated, forms a pattern cross-section. The first pattern cross-section + is designated as the reference pattern cross-section, which serves as the basis for comparisons. + + This script contains the following functions: + + 1. validate_rank_size(total_elements, size, num_elements_per_section): + - Validates if the total number of elements and sections can be evenly distributed across MPI ranks based on the two criteria's. + + 2. valid_sizes(total_elements, num_elements_per_section, min_size=100, max_size=2000) + - If the mentioned ranks size is not valid, searches for alternative MPI sizes (size values) that meet both criteria above within a given range. + - Useful for finding configurations that avoid uneven data distribution. + + 3. compute_element_centers(data) + - Flattens the element's node coordinates and computes the geometric center of each element by averaging all its node coordinates. + + 4. compute_cross_section_centers(data, num_elements_per_section, prev_overlap=None) + - Computes the center point of each individual cross-section by averaging element centers. + - Handles leftover/overlap elements between ranks for continuity. + + 5. assign_section(rank, ranks_per_section, final_cross_section_centers) + - Maps an MPI rank to its section based on ranks_per_section and retrieves the center of that section. + + 6. calculate_vectors(cross_section_center_chosen, element_centers_local, flow_directions) + - Calculates cylindrical-coordinate basis vectors for each element; + - Radial: from section center to element center (normalized). Theta: perpendicular to radial and z-axis (via cross product). Z: aligned with the flow direction. + + 7. vector_mapping(A) + - Determines how each local element vector maps to the reference directions (radial, theta, z) by matching the largest absolute + dot product in a (3x3) matrix of comparisons. + - Rows: local vectors (r_diff, s_diff, t_diff). + - Columns: reference vectors (radial, theta, z). + + 8. compare_vector_mappings(mappings_cy, ref_vector_mapping, idx_ranges) + - Compares computed cylindrical vector mappings against a reference mapping (ex: taking mappings from element 0) and + prints match percentage for the current rank's data range. + + 9. mappings() + Main computation pipeline: + - Reads the files and generates the necessary data structures. + - Calculates face differences, averages, normals, and face mappings. + - Applies data re-ordering and sign corrections of axes. + - Recomputes final mappings and flow directions. + - Computes element centers and cross-section centers across ranks (with overlap handling). + - Assigns each rank a section center. + - Computes radial, theta, and z vectors. + - Builds transformation matrix A and computes final vector mappings between local and reference vectors. + - Compares mappings against a reference and prints statistics. +""" +################################################################################################################################################## +################################################################################################################################################## #!/usr/bin/env python import os @@ -43,16 +98,18 @@ leftover = total_elements % size # Elements that don’t fit evenly in the rank (if any) import numpy as np -from pysemtools.io.reorder_data import ( +from pysemtools.io.mesh_orientation import ( generate_data, calculate_face_differences, calculate_face_averages, calculate_face_normals, - face_mappings, compare_mappings, compare_methods, reduce_face_normals, directions_face_normals, + face_mappings, compare_mappings, compare_methods, reduce_face_normals, face_normals_mappings, reorder_assigned_data, correct_axis_signs ) def validate_rank_size(total_elements: int, size: int, num_elements_per_section: int) -> bool: """ Validates whether the total number of elements and sections can be evenly distributed among ranks. - + - Criterion 1: Total number of elements must be evenly divisible across all ranks. + - Criterion 2: Each cross-section must be assigned an integer number of ranks. + Args: total_elements (int): The total number of elements in the dataset. size (int): The number of ranks available for computation. @@ -61,13 +118,13 @@ def validate_rank_size(total_elements: int, size: int, num_elements_per_section: Returns: bool: True if the distribution is valid, False otherwise. """ - # Criterion 1: elements_per_rank should be exactly divisible + # Criterion 1: Total number of elements must be evenly divisible across all ranks. elements_per_rank = total_elements // size if total_elements % size != 0: print(f"Error: Criteria 1 Failed, {total_elements} % {size} != 0 (elements_per_rank = {elements_per_rank})") if rank == 0 else None return False - - # Criterion 2: ranks_per_section should be exactly divisible + + # Criterion 2: Each cross-section must be assigned an integer number of ranks. ranks_per_section = num_elements_per_section // elements_per_rank if num_elements_per_section % elements_per_rank != 0: print(f"Error: Criteria 2 Failed, {num_elements_per_section} % {elements_per_rank} != 0 (ranks_per_section = {ranks_per_section})") if rank == 0 else None @@ -78,7 +135,7 @@ def validate_rank_size(total_elements: int, size: int, num_elements_per_section: def valid_sizes(total_elements: int, num_elements_per_section: int, min_size: int=100, max_size: int=2000): """ - Finds valid rank sizes for distributing elements evenly across computational processes. + If the user input is invalid, then this function finds valid rank sizes for distributing elements evenly across computational processes. Args: total_elements (int): Total number of elements. @@ -123,8 +180,8 @@ def compute_element_centers(data): numpy.ndarray: Array containing computed element centers. """ num_elements = data.shape[0] - flattened = data.reshape(num_elements, -1, 3) # (num_elements, 512, 3) - element_centers = np.mean(flattened, axis=1) # (num_elements, 3) + flattened = data.reshape(num_elements, -1, 3) + element_centers = np.mean(flattened, axis=1) # Shape: (num_elements, 3) return element_centers def compute_cross_section_centers(data, num_elements_per_section, prev_overlap=None): @@ -142,6 +199,7 @@ def compute_cross_section_centers(data, num_elements_per_section, prev_overlap=N """ cross_section_centers = [] all_data = [] + # Combine previous leftover and new data if prev_overlap is not None and len(prev_overlap) > 0: prev_overlap = np.array(prev_overlap) @@ -150,11 +208,12 @@ def compute_cross_section_centers(data, num_elements_per_section, prev_overlap=N all_data = np.vstack((prev_overlap, data)) else: all_data = data - + num_total_elements = all_data.shape[0] full_sections = num_total_elements // num_elements_per_section leftover_elements = num_total_elements % num_elements_per_section - + + # For each full section, compute the center by taking mean position of its elements and appends that center to the cross_section_centers list for i in range(full_sections): start_idx = i * num_elements_per_section end_idx = start_idx + num_elements_per_section @@ -190,6 +249,9 @@ def assign_section(rank, ranks_per_section, final_cross_section_centers): def calculate_vectors(cross_section_center_choosen, element_centers_local, flow_directions): """ Computes the radial, theta, and z unit vectors for each element relative to its cross-section center. + - Radial: from section center to element center (normalized) + - Theta: perpendicular to radial and z-axis (via cross product) + - Z: aligned with the flow direction Args: cross_section_center_chosen (numpy.ndarray): Selected cross-section center. @@ -223,31 +285,53 @@ def calculate_vectors(cross_section_center_choosen, element_centers_local, flow_ def vector_mapping(A): """ - Maps computed vectors to reference directions by finding the best fit per element. + Determines how each of the three local element vectors (rows of A) should be labeled as 'radial', 'theta', or 'z', based on the strongest directional match + with the three reference vectors (columns of A). + + The input A is a (num_elements, 3, 3) transformation matrix: + - local vector index → r_diff, s_diff, t_diff + - reference vector index → radial, theta, z + - Each value of A is a dot product between a local vector and a reference vector. + + Algorithm for each element: + - Take the absolute value of the dot products for matching strength. + - Repeatedly select the largest remaining dot product in the matrix. + - Assign the corresponding local vector (row) to the matching + reference vector (column). + - Mark that row and column as "used" by setting them to -inf, ensuring + each local vector and each reference vector is assigned exactly once. + - Continue until all three local vectors are assigned a unique reference label. Args: - A (numpy.ndarray): Transformation matrix containing dot product values between computed vectors. + A (numpy.ndarray): dot products between local and reference vectors for each element. Returns: - list: A list of tuples representing mapped vector assignments (radial, theta, z). + list[tuple]: One tuple per element ('radial', 'theta', 'z'), + indicating how each local vector maps to reference directions. """ num_elements = A.shape[0] mappings = [] for elem in range(num_elements): + # Take absolute values to measure directional alignment strength A_copy = np.abs(A[elem]) mapping = [""] * 3 reference_vectors = ['radial', 'theta', 'z'] assigned_rows = set() assigned_cols = set() + + # Match strongest remaining dot product for _ in range(3): max_index = np.unravel_index(np.argmax(A_copy, axis=None), A_copy.shape) row, col = max_index mapping[row] = reference_vectors[col] + + # Mark row and column as used to avoid duplicate assignments assigned_rows.add(row) assigned_cols.add(col) A_copy[row, :] = -np.inf A_copy[:, col] = -np.inf - + + # Ensure all vectors were uniquely assigned if len(assigned_rows) < 3 or len(assigned_cols) < 3: print(f"Warning: Not all vectors were assigned properly at element {elem}.") mappings.append(tuple(mapping)) @@ -282,13 +366,13 @@ def mappings() -> None: Notation: A group of cross-sections, when repeated, forms a pattern cross-section. The first pattern cross-section is designated as the reference pattern cross-section, which serves as the basis for comparisons. - Does the following: - - Reads input files and computes face differences, averages, and normal vectors. - - Applies reordering and axis corrections. - - Computes final mappings in cartesian coordinates after corrections. - - Computes element centres and each cross-section center. - - Calculates radial, theta, and z vectors for each element relative to the cross-section center. - - Computes vector mappings in cylindrical coordinates using transformation matrices. + This function: + - Reads input files and computes face differences, averages, and normal vectors. + - Applies re-ordering and sign corrections of axes. + - Computes final mappings in cartesian coordinates after corrections. + - Computes element centres and each cross-section center. + - Calculates radial, theta, and z vectors for each element relative to the cross-section center. + - Computes vector mappings in cylindrical coordinates using transformation matrices. Returns: None @@ -296,9 +380,7 @@ def mappings() -> None: fname_3d: str = "../../field0.f00000" - fname_out: str = "../../fieldout0.f00000" - - assigned_data, fld_3d_r, msh_3d = generate_data(fname_3d, fname_out) + assigned_data, fld_3d_r, msh_3d = generate_data(fname_3d) # Methods for cal. the difference and normal vectors assigned_r_diff, assigned_s_diff, assigned_t_diff = calculate_face_differences(assigned_data) @@ -348,7 +430,7 @@ def mappings() -> None: # Compute normal vectors and directions assigned_normals = calculate_face_normals(assigned_data_final) averaged_normals = reduce_face_normals(assigned_normals) - final_mappings_fn, flow_directions = directions_face_normals(averaged_normals) + final_mappings_fn, flow_directions = face_normals_mappings(averaged_normals) flow_directions = np. array(flow_directions) # Compare mappings after reordering @@ -443,7 +525,7 @@ def mappings() -> None: ] for i in range(num_elements) ]) - # Calculate vector mappings using the transformation matrix A + # Calculate cylindrical vector mappings using the transformation matrix A mappings_cy = vector_mapping(A) ref_vector_mapping = mappings_cy[0] if rank == 0 else None diff --git a/examples/10-mapping-grouping/elem_group.py b/examples/10-mapping-grouping/pattern_elem_group_averaging.py similarity index 75% rename from examples/10-mapping-grouping/elem_group.py rename to examples/10-mapping-grouping/pattern_elem_group_averaging.py index 10faf45..754904f 100644 --- a/examples/10-mapping-grouping/elem_group.py +++ b/examples/10-mapping-grouping/pattern_elem_group_averaging.py @@ -1,3 +1,47 @@ +################################################################################################################################################## +################################################################################################################################################## +""" + FUNCTIONS SUMMARY + + Notation used: A group of cross-sections, when repeated, forms a pattern cross-section. The first pattern cross-section + is designated as the reference pattern cross-section, which serves as the basis for comparisons. + + This script contains the following functions: + + 1. validate_rank_size(total_elements, size, num_elements_per_section): + - Validates if the total number of elements and sections can be evenly distributed across MPI ranks based on the two criteria's. + + 2. valid_sizes(total_elements, num_elements_per_section, min_size=100, max_size=2000) + - If the mentioned ranks size is not valid, searches for alternative MPI sizes (size values) that meet both criteria above within a given range. + - Useful for finding configurations that avoid uneven data distribution. + + 3. truncate_reduce(data, decimals): + - Applies controlled truncation of numerical precision to selected axes of data arrays. + - Helps mitigate floating-point precision errors when comparing distributed datasets. + + 4. extracted_fields(fld_3d_r): + - Dynamically extracts and maps key fields from the input FieldRegistry object. + - Handles standard fields like velocity (u, v, w), pressure, temperature, as well as additional scalar fields. + + 5. get_rank_for(z_max_elem_index): + - Determines the MPI rank responsible for a given global element index. + - Handles boundary conditions cases to ensure proper rank identification. + + 6. element_grouping(): + Main computation pipeline + - Reads input files and constructs required data structures. + - Computes face differences, averages, normals, and face mappings and applies data reordering and axis sign corrections. + - Identifies the reference pattern cross-section and groups elements with matching geometry across repeated patterns. + - Redistributes reference pattern cross-section data across MPI ranks for group elements together. + - Dynamically extracts relevant fields (e.g., velocity, pressure, temperature) based on user input. + - Exchanges only necessary grouped element data between ranks to minimize communication overhead. + - Performs group-wise field averaging. + - Builds a partitioned mesh for subdomain-level outputs and updates the field registry. + - Writes final processed fields to output files. +""" +################################################################################################################################################## +################################################################################################################################################## + #!/usr/bin/env python import os @@ -40,14 +84,15 @@ field_name = comm.bcast(field_name, root=0) import numpy as np -from pysemtools.io.reorder_data import ( +from pysemtools.io.mesh_orientation import ( generate_data, calculate_face_differences, calculate_face_averages, calculate_face_normals, - face_mappings, compare_mappings, compare_methods, reduce_face_normals, directions_face_normals, + face_mappings, compare_mappings, compare_methods, reduce_face_normals, face_normals_mappings, reorder_assigned_data, correct_axis_signs ) from pysemtools.io.ppymech.neksuite import pynekwrite from pysemtools.datatypes.field import FieldRegistry from pysemtools.datatypes.msh_partitioning import MeshPartitioner +from pysemtools.datatypes.msh import Mesh elem_pattern_cross_sections = num_elements_per_section * pattern_factor; # No. of elements in each pattern cross-section total_elements = elem_pattern_cross_sections * num_pattern_cross_sections # Total elements in the file. @@ -61,6 +106,8 @@ def validate_rank_size(total_elements: int, size: int, num_elements_per_section: int) -> bool: """ Validates whether the total number of elements and sections can be evenly distributed among ranks. + - Criterion 1: Total number of elements must be evenly divisible across all ranks. + - Criterion 2: Each cross-section must be assigned an integer number of ranks. Args: total_elements (int): The total number of elements in the dataset. @@ -70,13 +117,13 @@ def validate_rank_size(total_elements: int, size: int, num_elements_per_section: Returns: bool: True if the distribution is valid, False otherwise. """ - # Criterion 1: elements_per_rank should be exactly divisible + # Criterion 1: Total number of elements must be evenly divisible across all ranks. elements_per_rank = total_elements // size if total_elements % size != 0: print(f"Error: Criteria 1 Failed, {total_elements} % {size} != 0 (elements_per_rank = {elements_per_rank})") if rank == 0 else None return False - - # Criterion 2: ranks_per_section should be exactly divisible + + # Criterion 2: Each cross-section must be assigned an integer number of ranks. ranks_per_section = num_elements_per_section // elements_per_rank if num_elements_per_section % elements_per_rank != 0: print(f"Error: Criteria 2 Failed, {num_elements_per_section} % {elements_per_rank} != 0 (ranks_per_section = {ranks_per_section})") if rank == 0 else None @@ -169,9 +216,9 @@ def get_rank_for(z_max_elem_index): Identifies the rank responsible for storing a specific global element index. This function: - - Determines which rank holds the last element in `elem_pattern_cross_sections`. - - Computes the rank ID based on how elements are distributed across ranks. - - Ensures boundary conditions are handled correctly. + - Determines which rank holds the last element in `elem_pattern_cross_sections`. + - Computes the rank ID based on how elements are distributed across ranks. + - Ensures boundary conditions are handled correctly. Args: z_max_elem_index (int): The global index of the element to locate. @@ -179,10 +226,15 @@ def get_rank_for(z_max_elem_index): Returns: int: The rank ID that owns the specified element. """ + # Case where index is negative — assign to rank 0 if z_max_elem_index < 0: return 0 + + # Case where index exceeds total elements — assign to last rank elif z_max_elem_index >= total_elements: return size - 1 + + # Compute rank based on uniform distribution of elements else: return (z_max_elem_index // elements_per_rank)-1 @@ -190,33 +242,33 @@ def element_grouping() -> None: """ Groups elements, processes field data, and manages distributed computation across MPI ranks. - Notation: A group of cross-sections, when repeated, forms a pattern cross-section. The first pattern cross-section - is designated as the reference pattern cross-section, which serves as the basis for comparisons. - - 1. Data Loading & Processing: - - Reads input files and computes face differences, averages, and normal vectors. - - Applies reordering and axis corrections. + Notation: A group of cross-sections, when repeated, forms a pattern cross-section. The first pattern cross-section is designated as the reference pattern + cross-section, which serves as the basis for comparisons. - 2. Element Grouping: - - Determines a reference pattern cross-section and identifies repeating structures. - - Groups elements based on shape consistency, ensuring alignment across computational ranks. + - Data Loading & Processing: + a. Reads input files and computes face differences, averages, and normal vectors. + b. Applies reordering and axis corrections. - 3. Coordinate Collection & Redistribution: - - Retrieves coordinate data from the reference pattern cross-section that is distributed across multiple ranks. - - Ensures efficient redistribution of coordinates to all ranks for consistent processing and averaging. + - Element Grouping: + a. Elements with similar geometry are grouped together. + b. The reference pattern cross-section (i.e., the baseline structural unit) is identified, along with its repeating structures. + c. Since each pattern cross-section is split across an integer number of ranks (e.g., if the reference pattern cross-section spans ranks 0-5 and the next pattern + cross-section spans 6-11, and the following spans 12-17), the elements in the next pattern cross-sections (e.g., elements in ranks 6, 12, 18, and so on) will + generally have the same geometry as those in the reference pattern cross-section (e.g., elements in rank 0). + d. Ensures efficient redistribution of reference pattern cross-section to all ranks for grouping of elements together and further averaging. - 4. Field Extraction & Inter-Rank Data Exchange: - - Extracts relevant fields dynamically for structured storage and processing. - - Facilitates communication between ranks, sending and receiving required element data based on groups. - - Ensures each rank has access to necessary computational fields. + - Field Extraction & Inter-Rank Data Exchange: + a. Extracts relevant fields dynamically for structured storage and processing. + b. Facilitates communication between ranks, sending and receiving required element data based on groups. + c. Ensures that each MPI rank obtains the exact data needed to perform localized computations and field averaging. - 5. Group Averaging & Field Registry Management: - - Computes field averages across pattern cross-sections. - - Adds averaged fields to the registry. + - Group Averaging of fields & Field Registry Management: + a. Computes field averages across the elements that fall under the same group. + b. Constructs a subdomain mesh for processed data from the elements that satisfy certain conditions. + c. Adds averaged fields to the registry. - 6. Final Data Writing: - - Constructs a subdomain mesh for processed data. - - Writes computed fields to output files for continued analysis. + - Final Data Writing: + Writes computed fields to output files for continued analysis. Args: None @@ -225,11 +277,9 @@ def element_grouping() -> None: None (Processes, distributes, and saves field data) """ fname_3d = "../../field0.f00024" - - fname_out = "../../field_out_0.f00024" # Load data - assigned_data, fld_3d_r, msh_3d = generate_data(fname_3d, fname_out) + assigned_data, fld_3d_r, msh_3d = generate_data(fname_3d) # Methods for cal. the difference and normal vectors assigned_r_diff, assigned_s_diff, assigned_t_diff = calculate_face_differences(assigned_data) @@ -249,7 +299,7 @@ def element_grouping() -> None: compare_mappings(mappings_fd, ref_mapping_fd, "mappings_fd_before_reorder") compare_methods(mappings_fd, mappings_fa) - # Apply reordering of assigned data + # Apply reordering of "assigned data" assigned_data_new = reorder_assigned_data(assigned_data, mappings_fd, ref_mapping_fd) r_diff_new, s_diff_new, t_diff_new = calculate_face_differences(assigned_data_new) @@ -274,7 +324,7 @@ def element_grouping() -> None: # Compute normal vectors and directions assigned_normals = calculate_face_normals(assigned_data_final) averaged_normals = reduce_face_normals(assigned_normals) - final_mappings_fn, flow_directions = directions_face_normals(averaged_normals) + final_mappings_fn, flow_directions = face_normals_mappings(averaged_normals) flow_directions = np. array(flow_directions) # Compare mappings after reordering @@ -398,27 +448,36 @@ def element_grouping() -> None: fields = extracted_fields(fld_3d_r) fields_mentioned = [] + if field_name == 'vel': - fields_mentioned = ['u', 'v', 'w'] + fields_mentioned = ['u', 'v', 'w'] # Velocity components elif field_name == 'pres': - fields_mentioned = ['pres'] + fields_mentioned = ['pres'] # Pressure field elif field_name == 'temp': - fields_mentioned = ['temp'] + fields_mentioned = ['temp'] # Temperature field elif field_name == 'default': fields_mentioned = [] + + # Include all non-empty fields from the registry for key in fld_3d_r.fields.keys(): if len(fld_3d_r.fields[key]) != 0: fields_mentioned.append(key) + + # Expand 'vel' into its components if present if 'vel' in fields_mentioned: fields_mentioned.remove('vel') fields_mentioned = ['u', 'v', 'w'] + fields_mentioned + + # Expand 'scal' into individual scalar field names (s1, s2, ...) if 'scal' in fields_mentioned: fields_mentioned.remove('scal') length_scal = len(fld_3d_r.fields['scal']) scal_fields = [f's{i+1}' for i in range(length_scal)] fields_mentioned = fields_mentioned + scal_fields + print(f"Rank {rank} has default field_mentioned:", fields_mentioned) if rank == 0 else None - + + # Below we are performing the averaging of fields based on user input which are belonging to same group # Determine which groups, each rank is responsible for if rank == 0: owned_groups_split = [] @@ -428,9 +487,10 @@ def element_grouping() -> None: else: owned_groups_split = None + # Scatter the group ownership to all ranks owned_groups = comm.scatter(owned_groups_split, root=0) - # Determine which elements belonging to which rank + # Determine which elements belonging to which rank (Map each global element ID to its owning rank) if rank == 0: element_rank_mapping = [(gid, gid // elements_per_rank) for gid in range(total_elements)] else: @@ -530,11 +590,19 @@ def element_grouping() -> None: conidtion2 = msh_3d.z > 0.0 cond = condition1 & conidtion2 + # update the mesh coordinates based on the assigned data_final + x_new = assigned_data_final[..., 0].astype(np.float64) + y_new = assigned_data_final[..., 1].astype(np.float64) + z_new = assigned_data_final[..., 2].astype(np.float64) + + msh_3d_final = Mesh(comm, create_connectivity=False) + msh_3d_final.init_from_coords(comm, x=x_new, y=y_new, z=z_new) + # Initialize the mesh partitioner with the given condition - mp = MeshPartitioner(comm, msh=msh_3d, conditions=[cond]) + mp = MeshPartitioner(comm, msh=msh_3d_final, conditions=[cond]) # Create the properly partitioned sub mesh and field - partitioned_mesh = mp.create_partitioned_mesh(msh_3d, partitioning_algorithm="load_balanced_linear", create_conectivity=False) + partitioned_mesh = mp.create_partitioned_mesh(msh_3d_final, partitioning_algorithm="load_balanced_linear", create_conectivity=False) partitioned_field = mp.create_partitioned_field(fld, partitioning_algorithm="load_balanced_linear") print(f"fld.fields are", partitioned_field.fields.keys()) if rank == 0 else None diff --git a/pysemtools/io/reorder_data.py b/pysemtools/io/mesh_orientation.py similarity index 65% rename from pysemtools/io/reorder_data.py rename to pysemtools/io/mesh_orientation.py index 62caedb..004ad0e 100644 --- a/pysemtools/io/reorder_data.py +++ b/pysemtools/io/mesh_orientation.py @@ -1,3 +1,58 @@ +################################################################################################################################################## +################################################################################################################################################## +""" + FUNCTIONS SUMMARY + + This script contains the following functions: + + 1. generate_data(fname_3d) + - Reads mesh & field data and extracts element coordinate arrays. + + 2. calculate_face_differences(assigned_data) + - Computes normalized average difference vectors between opposing element faces (r, s, t directions). + - First calculates the difference between corresponding points on opposing faces, then takes the average, + and finally normalizes the result. + + 3. calculate_face_normals(assigned_data) + - Calculates raw face normal vectors for each of the six element faces. + + 4. calculate_face_averages(assigned_data) + - Computes normalized average difference vectors between opposing element faces (for re-verification). + - First calculates the average position of each face, then finds the difference between opposing faces, + and finally normalizes the result. + + 5. face_mappings(cal_r, cal_s, cal_t) + - Maps local element axes (r, s, t) to global axes (x, y, z) based on dominant difference vector directions. + - Ensures unique directional assignment per element. + - Identifies the principal flow direction for each element. + + 6. compare_mappings(mappings, ref_mapping, method) + - Compares a mapping set to a reference mapping and computes match percentage. + + 7. compare_methods(mappings_fd, mappings_fa) + - Compares face-difference-based mappings to face-average-based mappings and computes match percentage (ideally they should be equal. if not then there is some issue) + + 8. align_normals(normals, normals_pair, direction_type) + - Ensures that paired face normals are consistently oriented and aligned with a reference. Using rank 0's element 0 as the reference, it compares face normals across all elements + and adjusts their orientations as needed. + + 9. reduce_face_normals(normals) + - After alignment, averages the paired face normals to produce a reduced set of normals per element (reducing 6 face normals to 3 per element i.e., one in each axis). + + 10. face_normals_mappings(averaged_normals) + - Maps element axes using reduced normals and identifies principal flow directions. + + 11. reorder_assigned_data(assigned_data, mappings_fd, ref_mapping_fd) + - Reorders element coordinate data to match a given reference axis mapping. Initially, each element's coordinates are in local (r,s,t) space, which can vary in position within the element. + - This function maps them to the global (x,y,z) space using rank 0's element 0 as the reference. + + 12. correct_axis_signs(assigned_data_new, extract_reference_r, extract_reference_s, extract_reference_t, r_diff_new, s_diff_new, t_diff_new) + - Flips element axes if they are sign-inverted relative to a reference element (taken from rank 0's element 0). + +""" +################################################################################################################################################## +################################################################################################################################################## + from mpi4py import MPI import numpy as np from ..datatypes.msh import Mesh @@ -9,18 +64,17 @@ rank = comm.Get_rank() size = comm.Get_size() -def generate_data(fname_3d: str, fname_out: str): +def generate_data(fname_3d: str): """ Loads and processes 3D field data. This function: - - Reads mesh and field data from `fname_3d`. - - Extracts global coordinates of elements. - - Prepares structured mesh and field registry for further computations. + - Reads mesh and field data from `fname_3d`. + - Extracts global coordinates of elements. + - Prepares structured mesh and field registry for further computations. Args: fname_3d (str): Path to the input 3D field file. - fname_out (str): Path to the output processed field file. Returns: assigned_data (np.ndarray): Global coordinates of elements. @@ -34,12 +88,13 @@ def generate_data(fname_3d: str, fname_out: str): fld_3d_r = FieldRegistry(comm) pynekread(fname_3d, comm, data_dtype=np.single, fld=fld_3d_r) - pynekwrite(fname_out, comm, msh=msh_3d, fld=fld_3d, write_mesh=True, wdsz=4) - - data = read_data(comm, fname_out, ["x", "y", "z", "scal_0"], dtype = np.single) + # Based on your requirement one can also add "scal_0", "scal_1", ... for the data to read.For now we have only added "x", "y", "z" only. + data = read_data(comm, fname_3d, ["x", "y", "z"], dtype = np.single) # Extract global coordinates of elements x_global, y_global, z_global = data["x"], data["y"], data["z"] + + # Determine the size of the data. For now we are using all available data size_data = x_global.shape[0] subset_x, subset_y, subset_z = (arr[:size_data] for arr in (x_global, y_global, z_global)) @@ -54,9 +109,9 @@ def calculate_face_differences(assigned_data): Computes face difference vectors for each element. The function calculates: - - r_diff: Difference between front and back faces. - - s_diff: Difference between top and bottom faces. - - t_diff: Difference between left and right faces. + - r_diff: Difference between front and back faces. + - s_diff: Difference between top and bottom faces. + - t_diff: Difference between left and right faces. Args: assigned_data (np.ndarray): Array containing element coordinates. @@ -77,9 +132,9 @@ def calculate_face_normals(assigned_data): Computes normal vectors for each face of an element. Extracts: - - Front and back face normals. - - Left and right face normals. - - Top and bottom face normals. + - Front and back face normals. + - Left and right face normals. + - Top and bottom face normals. Args: assigned_data (np.ndarray): Array containing element coordinates. @@ -129,9 +184,9 @@ def calculate_face_averages(assigned_data): Similar to calculate_face_differences, just for re-verification. It computes the average difference vectors for each face. The function calculates: - - r_avg_diff: Averaged difference between front and back faces. - - s_avg_diff: Averaged difference between top and bottom faces. - - t_avg_diff: Averaged difference between left and right faces. + - r_avg_diff: Averaged difference between front and back faces. + - s_avg_diff: Averaged difference between top and bottom faces. + - t_avg_diff: Averaged difference between left and right faces. Args: assigned_data (np.ndarray): Array containing element coordinates. @@ -147,23 +202,36 @@ def calculate_face_averages(assigned_data): t_avg_diff = t_avg_diff/np.linalg.norm(t_avg_diff, axis=1, keepdims=True) return r_avg_diff, s_avg_diff, t_avg_diff -# Method: 1 +# Methods for calculating the face mappings: +# Method-1 def face_mappings(cal_r, cal_s, cal_t): """ - Determines face mappings based on directional differences. - - This function: - - Assigns maps local coordinate axes (r,s,t) to global coordinate axes (X,Y,Z). - - Determines mappings based on the dominant direction per face. - - Ensures unique assignments to avoid duplication. + Determines how each of the three local element axes (r, s, t) should be mapped to the global coordinate axes (x, y, z), based on the dominant + component in their face-to-face difference vectors. + + The inputs are (num_elements, 3) arrays: + - cal_r: difference vectors between opposing r-faces + - cal_s: difference vectors between opposing s-faces + - cal_t: difference vectors between opposing t-faces + - Each vector contains the x, y, and z components of the difference. + + Algorithm for each element: + - Take the absolute value of each component (x, y, z) in r, s, and t + to measure the magnitude in each direction. + - For each local axis (r, s, t): + a. Find the direction (x, y, or z) with the largest magnitude. + b. If that global direction is already assigned to another local axis, + invalidate it (-inf) and pick the next-largest direction. + - Assign each local axis to exactly one unique global axis. Args: - cal_r (np.ndarray): Difference vectors for r faces. - cal_s (np.ndarray): Difference vectors for s faces. - cal_t (np.ndarray): Difference vectors for t faces. + cal_r (np.ndarray): (num_elements, 3) array of r-face difference vectors. + cal_s (np.ndarray): (num_elements, 3) array of s-face difference vectors. + cal_t (np.ndarray): (num_elements, 3) array of t-face difference vectors. Returns: - List[Tuple[str, str, str]]: Face mappings for each element. + list[tuple]: One tuple per element ('x', 'y', 'z'), + indicating how each local axis (r, s, t) maps to global axes. """ num_elements = cal_r.shape[0] mappings = [] @@ -197,7 +265,7 @@ def face_mappings(cal_r, cal_s, cal_t): mappings.append(tuple(mapping)) return mappings -# Compare all methods: +# Compare each method w.r.t reference mapping: def compare_mappings(mappings, ref_mapping, method: str): """ Compares computed face mappings against a reference mapping. @@ -224,6 +292,7 @@ def compare_mappings(mappings, ref_mapping, method: str): matching_percentage = (match_count / num_elements) * 100 # print(f"Rank {rank} has matching percentage of {method} w.r.t ref_mapping_fd {ref_mapping}: {matching_percentage:.2f}%") +# Compare all methods: def compare_methods(mappings_fd, mappings_fa): """ Compares face difference-based mappings and face average-based mappings. @@ -253,9 +322,8 @@ def compare_methods(mappings_fd, mappings_fa): def align_normals(normals, normals_pair, direction_type): """ Aligns normal vectors for paired faces. - - - Ensures consistency in direction for paired faces. - - Uses a reference normal based on `direction_type` for alignment. + - Ensures consistency in direction for paired faces. + - Uses a reference normal based on `direction_type` for alignment. Args: normals (np.ndarray): Array of normal vectors (shape: [num_elements, 6, 3]). @@ -294,10 +362,10 @@ def reduce_face_normals(normals): Computes reduced normals for face pairs by ensuring consistency and averaging. This function: - - Separates face normals into paired groups (front/back, left/right, top/bottom). - - Aligns normals to maintain consistent orientation. - - Normalizes the face normal vectors. - - Computes averaged normals for each face pair. + - Separates face normals into paired groups (front/back, left/right, top/bottom). + - Aligns normals to maintain consistent orientation. + - Normalizes the face normal vectors. + - Computes averaged normals for each face pair. Args: normals (np.ndarray): Array of normal vectors (shape: [num_elements, 6, 3]). @@ -329,15 +397,23 @@ def reduce_face_normals(normals): return averaged_normals -def directions_face_normals(averaged_normals): +def face_normals_mappings(averaged_normals): """ Determines face normal mappings and flow directions. - - This function: - - Assigns maps local coordinate axes (r,s,t) to global coordinate axes (X,Y,Z) based on dominant normal directions. - - Ensures unique directional assignment per element. - - Identifies the principal flow direction for each element. - + Determines how each of the three local element axes (r, s, t) should be mapped to the global coordinate axes (x, y, z),based on the dominant component of their averaged face normals. + Also identifies the principal flow direction for each element. + + Algorithm for each element: + - Take the absolute value of each normal component to measure + magnitude. + - For each local axis (r, s, t): + a. Find the global axis (x, y, or z) with the largest magnitude. + b. If that global axis is already assigned to another local axis, + invalidate it (-inf) and pick the next-largest direction. + c. Assign the chosen global axis to the current local axis. + - While assigning, once the global 'z' axis is assigned, mark that + local axis index as the element's principal flow direction. + Args: averaged_normals (np.ndarray): Array of averaged face normals (shape: [num_elements, 3, 3]). @@ -383,17 +459,19 @@ def directions_face_normals(averaged_normals): # Reordering of subset def reorder_assigned_data(assigned_data, mappings_fd, ref_mapping_fd): """ - Reorders assigned data to match a reference mapping. - - This function: - - Identifies mismatches between computed mappings and the reference mapping. - - Applies permutation swaps to correct element order. - - Ensures consistency in face assignments across all elements. + Reorders each element’s assigned data to match a given reference axis mapping. + The assigned data for each element is initially in local (r, s, t) order, which can differ between elements. This function ensures all elements use + the same axis order as a reference mapping (taken from rank 0's element 0). + For each element: + - Compare its current mapping (from mappings_fd) with the reference mapping. + - If they differ, determine the permutation needed to match the reference. + - Apply the corresponding swap or cycle of axes to reorder the data array. + - Leave the element unchanged if it already matches the reference order. Args: - assigned_data (np.ndarray): Array of element data. - mappings_fd (List[Tuple[str, str, str]]): Computed face difference mappings. - ref_mapping_fd (Tuple[str, str, str]): Reference mapping from rank 0. + assigned_data (np.ndarray): Array containing element data, shape: (num_elements, dim, dim, dim, ...). + mappings_fd (list[tuple[str, str, str]]): Axis mappings for each element, derived from face differences. + ref_mapping_fd (tuple[str, str, str]): Reference mapping tuple from rank 0's element 0. Returns: np.ndarray: Reordered element data matching the reference mapping. @@ -448,24 +526,25 @@ def reorder_assigned_data(assigned_data, mappings_fd, ref_mapping_fd): def correct_axis_signs(assigned_data_new, extract_reference_r, extract_reference_s, extract_reference_t, r_diff_new, s_diff_new, t_diff_new): """ - Adjusts the axis signs based on a reference element. + Aligns the sign (orientation) of each element's local axes with a reference element. - This function: - - Computes sign consistency using dot products. - - Flips axes if mismatched with the reference element. - - Ensures all elements conform to the correct axis orientation. + After reordering the axes to match the reference mapping, some elements may still have their local axes pointing in the opposite direction. + This function corrects that by: + - Comparing each element's "r", "s", "t" difference vectors with the reference vectors using dot products to determine sign alignment. + - Flipping the data along the corresponding array axis if the sign is opposite. + - Ensuring all elements match the reference element's axis orientations. Args: - assigned_data_new (np.ndarray): Reordered element data. - extract_reference_r (np.ndarray): Reference r-axis direction. - extract_reference_s (np.ndarray): Reference s-axis direction. - extract_reference_t (np.ndarray): Reference t-axis direction. - r_diff_new (np.ndarray): Updated r-axis difference vectors. - s_diff_new (np.ndarray): Updated s-axis difference vectors. - t_diff_new (np.ndarray): Updated t-axis difference vectors. + assigned_data_new (np.ndarray): Element data after axis reordering. + extract_reference_r (np.ndarray): Reference r-axis direction vector. + extract_reference_s (np.ndarray): Reference s-axis direction vector. + extract_reference_t (np.ndarray): Reference t-axis direction vector. + r_diff_new (np.ndarray): Updated r-axis difference vectors for all elements. + s_diff_new (np.ndarray): Updated s-axis difference vectors for all elements. + t_diff_new (np.ndarray): Updated t-axis difference vectors for all elements. Returns: - np.ndarray: Corrected element data with aligned axes. + np.ndarray: A copy of "assigned_data_new" with all axes orientations corrected to match the reference element """ assigned_data_final = np.copy(assigned_data_new)