diff --git a/examples/unindexed/scalar_budgets/process_scalar_stats.py b/examples/unindexed/scalar_budgets/process_scalar_stats.py new file mode 100644 index 0000000..99adeb9 --- /dev/null +++ b/examples/unindexed/scalar_budgets/process_scalar_stats.py @@ -0,0 +1,208 @@ +# Import required modules +from mpi4py import MPI #equivalent to the use of MPI_init() in C +import matplotlib.pyplot as plt +import numpy as np +import os + +# Get mpi info +comm = MPI.COMM_WORLD + +sem_data_path = "../../data/sem_data/" +if not os.path.exists(sem_data_path): + print("Sem data not found, cloning repository...") + os.system(f"git clone https://github.com/adperezm/sem_data.git {sem_data_path}") +else: + print("Sem data found.") + +stats_dir = "./" + +## % Convert 2D stats to 3D stats + +fluid_stats2D_fname = os.path.join(sem_data_path,"statistics/channel_nelv_300_scalars/", "merged_fluid_stats2D0.f00000") +scalar_stats2D_fname = os.path.join(sem_data_path,"statistics/channel_nelv_300_scalars/", "merged_scalar_stats2D0.f00000") +fluid_stats3D_fname = "merged_fluid_stats3D.f00000" +scalar_stats3D_fname = "merged_scalar_stats3D.f00000" + +from pysemtools.postprocessing.statistics.RS_budgets import convert_2Dstats_to_3D +from pysemtools.postprocessing.statistics.passive_scalar_budgets import convert_scalar_2Dstats_to_3D + +convert_2Dstats_to_3D(fluid_stats2D_fname, fluid_stats3D_fname, datatype='single') +convert_scalar_2Dstats_to_3D(scalar_stats2D_fname, scalar_stats3D_fname, datatype='single') + + +## % Process the statistics: additional fields + +from pysemtools.postprocessing.statistics.RS_budgets import compute_and_write_additional_pstat_fields +from pysemtools.postprocessing.statistics.passive_scalar_budgets import compute_and_write_additional_sstat_fields + + +compute_and_write_additional_pstat_fields( + stats_dir, + fluid_stats3D_fname, + fluid_stats3D_fname, + fluid_stats3D_fname, + if_write_mesh=True, + which_code='neko', + nek5000_stat_type='', + if_do_dssum_on_derivatives=False) + + +compute_and_write_additional_sstat_fields( + stats_dir, + scalar_stats3D_fname, + scalar_stats3D_fname, + scalar_stats3D_fname, + if_write_mesh=True, + if_do_dssum_on_derivatives=False) + + +## % Interpolate + +def geometric_stretching(dx0,stretching,dxmax,xmax): + import numpy as np + + dx_base = dx0 * (stretching**np.linspace(0,1000,1001)) + x_base = np.cumsum( dx_base ) + N_inrange = np.sum( x_base<=xmax ) + x_base = x_base[0:N_inrange] + x_base = x_base/np.max(x_base) + + x = np.zeros(N_inrange+1) + x[0] = 0.0 + x[1:N_inrange+1] = x_base + + return x + +def user_defined_interpolating_points(): + import numpy as np + import pysemtools.interpolation.pointclouds as pcs + import pysemtools.interpolation.utils as interp_utils + + print("generate interpolation points") + + # Create the coordinates of the plane you want + x_bbox = [0.0, 2.*np.pi] + + nz = 1 + nx = 201 + + # add one point to exclude since periodic + x = np.linspace( x_bbox[0] , x_bbox[1] , nx+1 ) + + # exclude last points since periodic + z = .001 + x = .5 * ( x[0:nz]+x[1:]) + + re_tau = 180. + dy0 = .3/re_tau + stretching = 1.05 + dymax = 15./re_tau + ymax = 1.0 + + y_base = geometric_stretching(dy0,stretching,dymax,ymax) + y_base[0] = 0.1/re_tau # to avoid potential issues when maping to the exact walls + y_base = y_base - 1 + ny = y_base.size + Nstruct = [nx, ny, nz] + print('Nstruct=', Nstruct) + + X,Y,Z = np.meshgrid(x, y_base, z, indexing="ij") + + xyz = interp_utils.transform_from_array_to_list(nx, ny, nz, [X, Y, Z]) + + return xyz, Nstruct + +# Call the functions + +# Call the functions +# NOTE: this should either be called from rank 0 only and then propoerly distributed into mpi ranks +# or each rank should generate its own points +if comm.Get_rank() == 0: + xyz , Nstruct = user_defined_interpolating_points() +else: + xyz = 0 + + +from pysemtools.postprocessing.statistics.RS_budgets import interpolate_all_stat_and_pstat_fields_onto_points +from pysemtools.postprocessing.statistics.passive_scalar_budgets import interpolate_all_stat_and_sstat_fields_onto_points + +interpolate_all_stat_and_pstat_fields_onto_points( + stats_dir, + fluid_stats3D_fname, + fluid_stats3D_fname, + fluid_stats3D_fname, + xyz, + which_code='neko', + nek5000_stat_type='', + if_do_dssum_before_interp=False, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True +) + +interpolate_all_stat_and_sstat_fields_onto_points( + stats_dir, + scalar_stats3D_fname, + scalar_stats3D_fname, + scalar_stats3D_fname, + xyz, + if_do_dssum_before_interp=False, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True +) + + +## % Finish processing profiles and average + +def av_func(x): + import numpy as np + return np.mean(x, axis=0, keepdims=True) + + +from pysemtools.postprocessing.statistics.RS_budgets_interpolatedPoints_notMPI import read_interpolated_stat_hdf5_fields +from pysemtools.postprocessing.statistics.passive_scalar_budgets_interpolatedPoints_notMPI import read_interpolated_scalar_stat_hdf5_fields + +from pysemtools.postprocessing.statistics.RS_budgets_interpolatedPoints_notMPI import calculate_budgets_in_Cartesian +from pysemtools.postprocessing.statistics.passive_scalar_budgets_interpolatedPoints_notMPI import calculate_scalar_budgets_in_Cartesian + +Reynolds_number = 180 +Prandtl_number = 0.71 +If_average = True +If_convert_to_single = True +fname_averaged_fluid = 'averaged_and_renamed_interpolated_fluid_fields.hdf5' +fname_averaged_scalar = 'averaged_and_renamed_interpolated_scalar_fields.hdf5' +fname_budget_fluid = 'fluid_budgets.hdf5' +fname_budget_scalar = 'scalar_budgets.hdf5' + + +if comm.Get_rank() == 0: + read_interpolated_stat_hdf5_fields( + stats_dir , + Reynolds_number , + If_average , + If_convert_to_single , + Nstruct , + av_func , + output_fname = fname_averaged_fluid + ) + + read_interpolated_scalar_stat_hdf5_fields( + stats_dir , + Reynolds_number , + Prandtl_number , + If_average , + If_convert_to_single , + Nstruct , + av_func , + output_fname = fname_averaged_scalar + ) + + calculate_budgets_in_Cartesian( + path_to_files = stats_dir , + input_filename = fname_averaged_fluid , + output_filename = fname_budget_fluid ) + + calculate_scalar_budgets_in_Cartesian( + path_to_files = stats_dir , + input_scalar_filename = fname_averaged_scalar , + input_fluid_filename = fname_averaged_fluid , + output_filename = fname_budget_scalar ) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py new file mode 100644 index 0000000..6e9acba --- /dev/null +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -0,0 +1,1292 @@ +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +## Preliminary functions to make life easier. +########################################################################################### +########################################################################################### +# %% generic function to compute the gradient of a scalar field +def compute_scalar_first_derivative(comm, msh, coef, scalar, scalar_deriv): + if msh.gdim == 3: + scalar_deriv.c1 = coef.dudxyz(scalar, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv.c2 = coef.dudxyz(scalar, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv.c3 = coef.dudxyz(scalar, coef.drdz, coef.dsdz, coef.dtdz) + elif msh.gdim == 2: + scalar_deriv.c1 = coef.dudxyz(scalar, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv.c2 = coef.dudxyz(scalar, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv.c3 = 0.0 * scalar_deriv.c2 + else: + import sys + + sys.exit("supports either 2D or 3D data") + + +########################################################################################### +########################################################################################### + + +# %% generic function to compute the diagonal second derivatives of a scalar field from its gradient +def compute_scalar_second_derivative(comm, msh, coef, scalar_deriv, scalar_deriv2): + if msh.gdim == 3: + scalar_deriv2.c1 = coef.dudxyz(scalar_deriv.c1, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv2.c2 = coef.dudxyz(scalar_deriv.c2, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv2.c3 = coef.dudxyz(scalar_deriv.c3, coef.drdz, coef.dsdz, coef.dtdz) + elif msh.gdim == 2: + scalar_deriv2.c1 = coef.dudxyz(scalar_deriv.c1, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv2.c2 = coef.dudxyz(scalar_deriv.c2, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv2.c3 = 0.0 * scalar_deriv2.c3 + else: + import sys + + sys.exit("supports either 2D or 3D data") + + +########################################################################################### +########################################################################################### + + +# %% generic function to write a 9 component field with input as 3 vectors of 3 components each +def write_file_9c(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c4", field=dV_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c5", field=dV_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c6", field=dV_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c7", field=dW_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c8", field=dW_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c9", field=dW_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() + + +########################################################################################### +########################################################################################### + + +# %% generic function to write a 6-component field with input as 2 vectors of 3 components each +def write_file_6c(comm, msh, dU_dxi, dV_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c4", field=dV_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c5", field=dV_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c6", field=dV_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() + + +########################################################################################### +########################################################################################### + + +# %% generic function to write a 3-component (vector) field +def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() + +########################################################################################### +########################################################################################### + + +# %% generic function to write the trace of three 3-component (vector) fields (as a scalar) +def write_file_trace3(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="sum", field=dU_dxi.c1 + dV_dxi.c2 + dW_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() + + +########################################################################################### +########################################################################################### + + +# %% function to generate the list of fields from the file header +def return_list_of_vars_from_filename(fname): + from pymech.neksuite.field import read_header + + header = read_header(fname) + vars_ = header.nb_vars + + vel_fields = vars_[1] + pres_fields = vars_[2] + temp_fields = vars_[3] + scal_fields = vars_[4] + if scal_fields > 37: + import warnings + + print("Number of scalar fields: " + (f"{(scal_fields):.0f}")) + warnings.warn( + "The number of scalar fields above 39 is not supported. " + + "This was done to make converted 2D statistics files consistent! " + + "Limiting the number to 37..." + ) + scal_fields = 37 + + field_names = [] + + for i in range(vel_fields): + tmp = ["vel_" + str(i)] + field_names = field_names + tmp + + if pres_fields == 1: + field_names = field_names + ["pres"] + + if temp_fields == 1: + field_names = field_names + ["temp"] + + for i in range(scal_fields): + tmp = ["scal_" + str(i)] + field_names = field_names + tmp + + return field_names + + +########################################################################################### +########################################################################################### + + +# %% do dssum on a vector with components c1, c2, c3 +def do_dssum_on_3comp_vector(dU_dxi, msh_conn, msh): + msh_conn.dssum(field=dU_dxi.c1, msh=msh, average="multiplicity") + msh_conn.dssum(field=dU_dxi.c2, msh=msh, average="multiplicity") + msh_conn.dssum(field=dU_dxi.c3, msh=msh, average="multiplicity") + + +# def do_dssum_on_3comp_vector(dU_dxi, coef, msh): +# coef.dssum(dU_dxi.c1, msh) +# coef.dssum(dU_dxi.c2, msh) +# coef.dssum(dU_dxi.c3, msh) +########################################################################################### +########################################################################################### + +#%% convert 2D statistics to 3D +def convert_scalar_2Dstats_to_3D(stats2D_filename,stats3D_filename,datatype='single'): + from mpi4py import MPI + import numpy as np + import warnings + from pysemtools.io.ppymech.neksuite import pynekread,pynekwrite + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.utils import extrude_2d_sem_mesh + from pysemtools.interpolation.interpolator import get_bbox_from_coordinates + from pysemtools.interpolation.point_interpolator.single_point_helper_functions import GLL_pwts + + # Get mpi info + comm = MPI.COMM_WORLD + + # initialize fields + msh = Mesh(comm, create_connectivity=False) + fld = FieldRegistry(comm) + + if datatype=='single': + data_type = np.single + wdsz = 4 + else: + data_type = np.double + wdsz = 8 + + # read mesh and fields + pynekread(stats2D_filename, comm, data_dtype=data_type, msh=msh, fld=fld) + + # get the + field_names = return_list_of_vars_from_filename(stats2D_filename) + print('fields in the given file: ', field_names ) + + # extruding mesh and fields + ## Find how much to extrude - Extrude the size of the smallest element + bbox = get_bbox_from_coordinates(msh.x, msh.y, msh.z) + bbox_dist = np.zeros((bbox.shape[0], 3)) + bbox_dist[:, 0] = bbox[:, 1] - bbox[:, 0] + bbox_dist[:, 1] = bbox[:, 3] - bbox[:, 2] + bbox_dist[:, 2] = bbox[:, 5] - bbox[:, 4] + local_bbox_min_dist = np.min( + np.sqrt(bbox_dist[:, 0] ** 2 + bbox_dist[:, 1] ** 2 + bbox_dist[:, 2] ** 2) / 2 + ) + bbox_min_dist = comm.allreduce(local_bbox_min_dist, op=MPI.MIN) + ## Generate the point distribution + x_, _ = GLL_pwts(msh.lx) + extrusion_size = bbox_min_dist + point_dist = np.flip(x_ * extrusion_size) + msh3d, fld3d = extrude_2d_sem_mesh(comm, lz = msh.lx, msh = msh, fld = fld, point_dist=point_dist) + + # filling in the missing z velocity + z_vel = fld3d.registry['s37'] + fld3d.add_field(comm, field_name="w", field=z_vel, dtype=np.single) + warnings.warn('The s37 field () was not removed from the file. '+ + 'Be careful with potential inconsistencies!') + + # writing the extruded stats file + pynekwrite(stats3D_filename, comm, msh=msh3d, fld=fld3d, write_mesh=True, wdsz=wdsz) + + + +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +## generic function to compute the additional fields required for budget terms, etc. +## only implemented for Neko +########################################################################################### +########################################################################################### +def compute_and_write_additional_sstat_fields( + which_dir, + fname_mesh, + fname_mean, + fname_stat, + if_write_mesh=False, + if_do_dssum_on_derivatives=False, +): + + ########################################################################################### + # do some initial checks + import sys + + # check if file names are the same + if fname_mean != fname_stat: + sys.exit("fname_mean must be the same as fname_stat") + + + ########################################################################################### + import warnings + from mpi4py import MPI # equivalent to the use of MPI_init() in C + import numpy as np + + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.coef import Coef + from pysemtools.io.ppymech.neksuite import pynekread + + if if_do_dssum_on_derivatives: + from pysemtools.datatypes.msh_connectivity import MeshConnectivity + + ########################################################################################### + # Get mpi info + comm = MPI.COMM_WORLD + + ########################################################################################### + # intialize the mesh and some fields + msh = Mesh(comm, create_connectivity=False) + # print('mesh dimension: ', msh.gdim ) + # else: + # msh = Mesh(comm, create_connectivity=False) + + stat_fields = FieldRegistry(comm) + + # generic scalar and vector derivatives + dQ1_dxi = FieldRegistry(comm) + dQ2_dxi = FieldRegistry(comm) + dQ3_dxi = FieldRegistry(comm) + d2Q1_dxi2 = FieldRegistry(comm) + + ########################################################################################### + # using the same .fXXXXXX extenstion as the mean fields + this_ext = fname_mean[-8:] + this_ext_check = fname_stat[-8:] + + # check the two files match. can be replaced by an error, but that seems too harsh and limiting + if this_ext != this_ext_check: + warnings.warn( + "File index of fname_stat and fname_mean differ! Hope you know what you are doing!" + ) + + full_fname_mesh = which_dir + "/" + fname_mesh + full_fname_stat = which_dir + "/" + fname_stat + + ########################################################################################### + # read mesh and compute coefs + pynekread(full_fname_mesh, comm, msh=msh, data_dtype=np.single) + if if_do_dssum_on_derivatives: + msh_conn = MeshConnectivity(comm, msh, rel_tol=1e-5) + + if msh.gdim < 3: + sys.exit("only 3D data is supported at the moment! ") + + coef = Coef(msh, comm, get_area=False) + + ########################################################################################### + # define file_keys of the fields based on the codes + + # Neko key names taken from: https://neko.cfd/docs/develop/df/d8f/statistics-guide.html + file_keys_S = "pres" + # "US" "VS" "WS" + file_keys_UiS = ["vel_0", "vel_1", "vel_2"] + file_keys_SS = "temp" + # "USS" "VSS" "WSS" + file_keys_UiSS = ["scal_2", "scal_3", "scal_4"] + # "UUS" "VVS" "WWS" "UVS" "UWS" "VWS" + file_keys_UiUjS = ["scal_5", "scal_6", "scal_7", "scal_8", "scal_9", "scal_10"] + file_keys_PS = "scal_11" + + file_keys_UidSdxj = [ + "scal_15", + "scal_16", + "scal_17", + "scal_18", + "scal_19", + "scal_20", + "scal_21", + "scal_22", + "scal_23", + ] + file_keys_SdUidxj = [ + "scal_24", + "scal_25", + "scal_26", + "scal_27", + "scal_28", + "scal_29", + "scal_30", + "scal_31", + "scal_32", + ] + + ########################################################################################### + # S first and second derivatives + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="S", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_S, + dtype=np.single, + ) + + compute_scalar_first_derivative(comm, msh, coef, stat_fields.registry["S"], dQ1_dxi) + compute_scalar_second_derivative(comm, msh, coef, dQ1_dxi, d2Q1_dxi2) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(d2Q1_dxi2, msh_conn, msh) + + this_file_name = which_dir + "/dnSdxn" + this_ext + write_file_6c( + comm, msh, dQ1_dxi, d2Q1_dxi2, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + + ########################################################################################### + # SS first and second derivatives + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="SS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SS, + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SS"], dQ1_dxi + ) + compute_scalar_second_derivative(comm, msh, coef, dQ1_dxi, d2Q1_dxi2) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(d2Q1_dxi2, msh_conn, msh) + + this_file_name = which_dir + "/dnSSdxn" + this_ext + write_file_6c( + comm, msh, dQ1_dxi, d2Q1_dxi2, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + del d2Q1_dxi2 + + ########################################################################################### + # UiS first derivative + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="US", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["US"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUiSdxj" + this_ext + write_file_9c( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + ########################################################################################### + # UiSS divergence + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="USS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiSS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VSS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiSS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WSS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiSS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["USS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VSS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WSS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUjSSdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + ########################################################################################### + # UiUjS gradients: dUiUjS/dxj + ########################################################################################### + + # dUUjS/dxj + + stat_fields.add_field( + comm, + field_name="UUS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[4], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UUS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UVS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UWS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUUjSdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # dVUjS/dxj + + stat_fields.add_field( + comm, + field_name="UVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UVS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VVS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VWS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dVUjSdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # dWUjS/dxj + + stat_fields.add_field( + comm, + field_name="UWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[5], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UWS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VWS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WWS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dWUjSdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + ########################################################################################### + # PS first derivative + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="PS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_PS, + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["PS"], dQ1_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + this_file_name = which_dir + "/dPSdxj" + this_ext + write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) + + stat_fields.clear() + + ########################################################################################### + # UidSdxj gradients: d(UidS/dxj)/dxj + ########################################################################################### + + # d(UdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="UdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUdSdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(VdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="VdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dVdSdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(WdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="WdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[6], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[7], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[8], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dWdSdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + ########################################################################################### + # SdUidxj gradients: d(SdUi/dxj)/dxj + ########################################################################################### + + # d(SdU/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="SdUdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdUdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdUdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dSdUdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(SdV/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="SdVdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdVdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdVdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dSdVdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(SdW/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="SdWdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[6], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdWdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[7], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdWdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[8], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dSdWdxjdxj" + this_ext + write_file_trace3( + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + del dQ1_dxi, dQ2_dxi, dQ3_dxi + if comm.Get_rank() == 0: + print("-------As a great man once said: run successful: dying ...") + + +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +# interpolate the 42+N fields onto the user specified set of points +# only implemented for Neko +########################################################################################### +########################################################################################### +def interpolate_all_stat_and_sstat_fields_onto_points( + which_dir, + fname_mesh, + fname_mean, + fname_stat, + xyz, + if_do_dssum_before_interp=True, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True, + interpolation_output_fname="interpolated_scalar_fields.hdf5", + find_points_tol=None, +): + + from mpi4py import MPI # equivalent to the use of MPI_init() in C + import numpy as np + from scipy.io import savemat + + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.coef import Coef + from pysemtools.io.ppymech.neksuite import pynekread + from pysemtools.interpolation.probes import Probes + + if if_do_dssum_before_interp: + from pysemtools.datatypes.msh_connectivity import MeshConnectivity + + if if_create_boundingBox_for_interp: + from pysemtools.datatypes.msh_partitioning import MeshPartitioner + + ########################################################################################### + # do some initial checks + import sys + + # check if file names are the same + if fname_mean != fname_stat: + sys.exit("fname_mean must be the same as fname_stat") + + + ########################################################################################### + # Get mpi info + comm = MPI.COMM_WORLD + + ########################################################################################### + # intialize the mesh and some fields + msh = Mesh(comm, create_connectivity=False) + mean_fields = FieldRegistry(comm) + + ########################################################################################### + # using the same .fXXXXXX extenstion as the mean fields + this_ext = fname_mean[-8:] + this_ext_check = fname_stat[-8:] + + # check the two files match. can be replaced by an error, but that seems too harsh and limiting + if this_ext != this_ext_check: + warnings.warn( + "File index of fname_stat and fname_mean differ! Hope you know what you are doing!" + ) + + full_fname_mesh = which_dir + "/" + fname_mesh + + # get the file name for the 42 fileds collected in run time + these_names = [ which_dir + "/" + fname_stat ] + + # add the name of the additional fields + these_names.extend( + [ + which_dir + "/dnSdxn" + this_ext, + which_dir + "/dnSSdxn" + this_ext, + which_dir + "/dUiSdxj" + this_ext, + which_dir + "/dUjSSdxj" + this_ext, + which_dir + "/dUUjSdxj" + this_ext, + which_dir + "/dVUjSdxj" + this_ext, + which_dir + "/dWUjSdxj" + this_ext, + which_dir + "/dPSdxj" + this_ext, + which_dir + "/dUdSdxjdxj" + this_ext, + which_dir + "/dVdSdxjdxj" + this_ext, + which_dir + "/dWdSdxjdxj" + this_ext, + which_dir + "/dSdUdxjdxj" + this_ext, + which_dir + "/dSdVdxjdxj" + this_ext, + which_dir + "/dSdWdxjdxj" + this_ext, + ] + ) + + # if comm.Get_rank() == 0: + # print(these_names) + + ########################################################################################### + # read mesh and redefine it based on the boundaring box if said + pynekread(full_fname_mesh, comm, msh=msh, data_dtype=np.single) + + if msh.gdim < 3: + sys.exit( + "only 3D data is supported!", + "you can convert your data to 3D using 'convert_2Dstats_to_3D'!", + ) + + if if_do_dssum_before_interp: + msh_conn = MeshConnectivity(comm, msh, rel_tol=1e-5) + + if if_create_boundingBox_for_interp: + xyz_max = np.max(xyz, axis=0) + xyz_min = np.min(xyz, axis=0) + + if comm.Get_rank() == 0: + print("xyz_min: ", xyz_min) + print("xyz_max: ", xyz_max) + + cond = ( + (msh.x >= xyz_min[0]) + & (msh.x <= xyz_max[0]) + & (msh.y >= xyz_min[1]) + & (msh.y <= xyz_max[1]) + & (msh.z >= xyz_min[2]) + & (msh.z <= xyz_max[2]) + ) + + mp = MeshPartitioner(comm, msh=msh, conditions=[cond]) + msh = mp.create_partitioned_mesh( + msh, partitioning_algorithm="load_balanced_linear", create_conectivity=True + ) + + ########################################################################################### + # compute coef, for interpolation + coef = Coef(msh, comm, get_area=False) + + ########################################################################################### + # initiate probes + # probes = Probes(comm, probes=xyz, msh=msh, \ + # point_interpolator_type="multiple_point_legendre_numpy", \ + # global_tree_type="domain_binning" , \ + # max_pts = 256 ) + + probe_kwargs = { + "comm": comm, + "msh": msh, + "point_interpolator_type": "multiple_point_legendre_numpy", + "max_pts": 128, + "output_fname": interpolation_output_fname, + } + if find_points_tol is not None: + probe_kwargs["find_points_tol"] = find_points_tol + + if not if_pass_points_to_rank0_only: + probes = Probes(probes=xyz, **probe_kwargs) + else: + if comm.Get_rank() == 0: + probes = Probes(probes=xyz, **probe_kwargs) + else: + probes = Probes(probes=None, **probe_kwargs) + + ########################################################################################### + for fname in these_names: + + if comm.Get_rank() == 0: + print("----------- working on file: ", fname) + + ######################### + field_names = return_list_of_vars_from_filename(fname) + # if comm.Get_rank() == 0: + # print(field_names) + ######################### + + for icomp in range(0, len(field_names)): + if comm.Get_rank() == 0: + print( + "---working on field ", icomp, "from a total of ", len(field_names) + ) + + # load the field + mean_fields.add_field( + comm, + field_name="tmpF", + file_type="fld", + file_name=fname, + file_key=field_names[icomp], + dtype=np.single, + ) + + if if_create_boundingBox_for_interp: + mean_fields = mp.create_partitioned_field( + mean_fields, partitioning_algorithm="load_balanced_linear" + ) + + # do dssum to make it continuous + if if_do_dssum_before_interp: + msh_conn.dssum( + field=mean_fields.registry["tmpF"], msh=msh, average="multiplicity" + ) + # coef.dssum(mean_fields.registry["tmpF"], msh) + + # interpolate the fields + probes.interpolate_from_field_list( + 0, [mean_fields.registry["tmpF"]], comm, write_data=True + ) + + mean_fields.clear() + + +########################################################################################### +########################################################################################### diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py new file mode 100644 index 0000000..4bbf153 --- /dev/null +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -0,0 +1,794 @@ +# %% +##################################################################################### +##################################################################################### +##################################################################################### +# function to read and store the interpolated fields as structured and averaged fields +##################################################################################### +import time + + +def read_interpolated_scalar_stat_hdf5_fields( + path_to_files, + Reynolds_number, + Prandtl_number, + If_average, + If_convert_to_single, + Nstruct, + av_func, + output_fname="averaged_and_renamed_interpolated_scalar_fields.hdf5", +): + + # %% + import h5py + import numpy as np + import time + + # %% + # Generic function to load field by number + def load_field(num, prefix="interpolated_scalar_fields"): + """Load HDF5 field from file by number. + + Parameters: + - num: field number (int) + - prefix: filename prefix (default: 'interpolated_scalar_fields') + + Returns: + - Flattened numpy array of field data + """ + filename = path_to_files + "/" + f"{prefix}{num:05d}.hdf5" + with h5py.File(filename, "r") as f: + return np.array(f["/field_0"]).flatten() + + # %% + print("reading xyz coordinates...") + start_time = time.time() + with h5py.File( + path_to_files + "/" + "coordinates_interpolated_scalar_fields.hdf5", "r" + ) as f: + XYZ_vec = np.array(f["/xyz"]).T # Transpose to match MATLAB's permute([2,1]) + XYZ_vec = XYZ_vec.T + + Npts = XYZ_vec.shape[0] + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% + print("reading the 42 statistics fields...") + start_time = time.time() + + S_vec = load_field(4) + UiS_vec = np.column_stack([load_field(i) for i in range(1, 4)]) + S2_vec = load_field(5) + S3_vec = load_field(6) + S4_vec = load_field(7) + UiS2_vec = np.column_stack([load_field(i) for i in range(8, 11)]) + UiUjS_vec = np.column_stack([load_field(i) for i in range(11, 17)]) + PS_vec = load_field(17) + PdSdxi_vec = np.column_stack([load_field(i) for i in range(18, 21)]) + UidSdxj_vec = np.column_stack([load_field(i) for i in range(21, 30)]) + SdUidxj_vec = np.column_stack([load_field(i) for i in range(30, 39)]) + SDiss_vec = np.column_stack([load_field(i) for i in range(39, 43)]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% make 1D arrays 2D + S_vec = S_vec[:, np.newaxis] + S2_vec = S2_vec[:, np.newaxis] + S3_vec = S3_vec[:, np.newaxis] + S4_vec = S4_vec[:, np.newaxis] + PS_vec = PS_vec[:, np.newaxis] + + # %% + print("reading the additional fields...") + start_time = time.time() + + dSdxj_vec = np.column_stack([load_field(i) for i in range(43, 46)]) + d2Sdxj2_vec = np.column_stack([load_field(i) for i in range(46, 49)]) + dS2dxj_vec = np.column_stack([load_field(i) for i in range(49, 52)]) + d2S2dxj2_vec = np.column_stack([load_field(i) for i in range(52, 55)]) + dUiSdxj_vec = np.column_stack([load_field(i) for i in range(55, 64)]) + dUjS2dxj_vec = np.column_stack([load_field(64)]) + dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(65, 68)]) + dPSdxj_vec = np.column_stack([load_field(i) for i in range(68, 71)]) + dUidSdxjdxj_vec = np.column_stack([load_field(i) for i in range(71, 74)]) + dSdUidxjdxj_vec = np.column_stack([load_field(i) for i in range(74, 77)]) + + print("Finished reading all fields.") + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # Save the variables in a directory + vars = {} + for name in list(locals().keys()): + if name.endswith("_vec") and isinstance(locals()[name], np.ndarray): + vars[name] = locals()[name] + + # %% Convert arrays to single precision if required + if If_convert_to_single: + print("Converting arrays into single precision...") + start_time = time.time() + for name in list(vars.keys()): + if name.endswith("_vec") and isinstance(vars[name], np.ndarray): + # print(np.shape(locals()[name])) + vars[name] = vars[name].astype(np.float32) + print("Conversion complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reshape arrays based on Nstruct + print("Reshaping into arrays...") + start_time = time.time() + for name in list(vars.keys()): + if name.endswith("_vec") and isinstance(vars[name], np.ndarray): + reshaped_name = name[:-4] + "_struct" + vars[reshaped_name] = vars[name].reshape( + (Nstruct[1], Nstruct[0], Nstruct[2], vars[name].shape[1]), order="F" + ) + del vars[name] + print("Reshaping complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Permute arrays to original shape + print("Permuting arrays into the original shape...") + start_time = time.time() + for name in list(vars.keys()): + if name.endswith("_struct") and isinstance(vars[name], np.ndarray): + vars[name] = np.transpose(vars[name], (1, 0, 2, 3)) + print("Permutation complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Apply user-specified averaging function if required + if If_average: + print("Taking the user-specified average using function av_func...") + start_time = time.time() + for name in list(vars.keys()): + if name.endswith("_struct") and isinstance(vars[name], np.ndarray): + vars[name] = av_func(vars[name]) + print("Averaging complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reynolds and Prandtl numbers needed to calculate viscous related terms later + Rer_here = Reynolds_number + Prt_here = Prandtl_number + vars["Rer_here"] = Rer_here + vars["Prt_here"] = Prt_here + + # %% Save the data in HDF5 format + print("Saving the data in HDF5 format...") + start_time = time.time() + with h5py.File(path_to_files + "/" + output_fname, "w") as hf: + global_vars = dict(vars) # Create a copy to avoid modification issues + for name, data in global_vars.items(): + if (name.endswith("_struct") or name.endswith("_here")) and isinstance( + data, (np.ndarray, int, float) + ): + hf.create_dataset(name, data=data) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% + print("Data saved successfully in HDF5 format.") + + +# %% +##################################################################################### +##################################################################################### +##################################################################################### +# function to read the raw but averaged fields and calcuate the scalar budgets in Cartesian coordinates +# the fluid statistics are also needed to calculate some terms in the budgets +##################################################################################### +def calculate_scalar_budgets_in_Cartesian( + path_to_files="./", + input_scalar_filename="averaged_and_renamed_interpolated_scalar_fields.hdf5", + input_fluid_filename="averaged_and_renamed_interpolated_fields.hdf5", + output_filename="sstat3d_format.hdf5", +): + + # %% + import h5py + import numpy as np + import time + import sys + + # %% + with ( + h5py.File( + path_to_files + "/" + input_scalar_filename, "r" + ) as input_scalar_file, + h5py.File(path_to_files + "/" + input_fluid_filename, "r") as input_fluid_file, + h5py.File(path_to_files + "/" + output_filename, "w") as output_file, + ): + + # %% + Rer_here = np.float64(input_scalar_file["Rer_here"]) + print("Reynolds number = ", Rer_here) + + output_file.create_dataset("Rer_here", data=Rer_here, compression=None) + + # %% + Prt_here = np.float64(input_scalar_file["Prt_here"]) + print("Prandtl number = ", Prt_here) + + output_file.create_dataset("Prt_here", data=Prt_here, compression=None) + + # %% XYZ coordinates + print("--------------working on XYZ coordinates...") + start_time = time.time() + + XYZ = np.array(input_scalar_file["XYZ_struct"]) + output_file.create_dataset("XYZ", data=XYZ, compression=None) + Nxyz = XYZ.shape[:3] + print("Number of points: ", Nxyz) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Check if scalar XYZ coordinates match fluid XYZ coordinates + print( + "--------------checking if scalar XYZ coordinates match fluid XYZ coordinates..." + ) + start_time = time.time() + + XYZ_fluid = np.array(input_fluid_file["XYZ_struct"]) + Nxyz_fluid = XYZ_fluid.shape[:3] + + # Check if number of points match + if Nxyz != Nxyz_fluid: + sys.exit( + "Number of points in scalar and fluid files do not match! " + f"Scalar: {Nxyz}, Fluid: {Nxyz_fluid}" + ) + + # Check if coordinates match + if not np.allclose(XYZ, XYZ_fluid): + sys.exit("EXYZ coordinates in scalar and fluid files do not match!") + else: + print("XYZ coordinates match between scalar and fluid files.") + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Load required fluid statistics for calculating scalar budgets + # %% Mean velocities + print("--------------loading mean velocities...") + start_time = time.time() + + Ui = np.array(input_fluid_file["UVW_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean velocity gradients + print("--------------loading mean velocity gradients...") + start_time = time.time() + + dUidXj = np.array(input_fluid_file["dUidxj_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean velocity second derivatives + print("--------------loading mean velocity second derivatives...") + start_time = time.time() + + d2UidXj2 = np.array(input_fluid_file["d2Uidx2_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Pressure + print("--------------loading pressure...") + start_time = time.time() + + P = np.array(input_fluid_file["P_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Pressure gradients + print("--------------loading pressure gradients...") + start_time = time.time() + + dPdXj = np.array(input_fluid_file["dPdx_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reynolds stresses + print("--------------loading Reynolds stresses...") + start_time = time.time() + + UiUj = np.array(input_fluid_file["UiUj_struct"]) + UiUj = UiUj - (Ui[..., [0, 1, 2, 0, 0, 1]] * Ui[..., [0, 1, 2, 1, 2, 2]]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reynolds stress gradients + print("--------------loading Reynolds stress gradients...") + start_time = time.time() + + dUiUjdXk = np.array(input_fluid_file["dUiUjdx_struct"]) + dUiUjdXk = ( + dUiUjdXk + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Start processing scalar statistics + + # %% Scalar and Its Moments + print("--------------working on scalar and its moments...") + start_time = time.time() + + S = np.array(input_scalar_file["S_struct"]) + S2 = np.array(input_scalar_file["S2_struct"]) + S3 = np.array(input_scalar_file["S3_struct"]) + S4 = np.array(input_scalar_file["S4_struct"]) + + S2 = S2 - S**2 + S3 = S3 - S**3 - 3 * S * S2 + S4 = S4 - S**4 - 6 * S**2 * S2 - 4 * S * S3 + + output_file.create_dataset("S", data=S, compression=None) + output_file.create_dataset("S2", data=S2, compression=None) + output_file.create_dataset("S3", data=S3, compression=None) + output_file.create_dataset("S4", data=S4, compression=None) + + del S3, S4 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent Scalar Fluxes + print("--------------working on scalar fluxes...") + start_time = time.time() + + UiS = np.array(input_scalar_file["UiS_struct"]) + + UiS = UiS - Ui * S + + output_file.create_dataset("UiS", data=UiS, compression=None) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar variance fluxes + print("--------------working on scalar variance fluxes...") + start_time = time.time() + + UiS2 = np.array(input_scalar_file["UiS2_struct"]) + + UiS2 = UiS2 - Ui * S2 - 2 * S * UiS - Ui * S**2 + + output_file.create_dataset("UiS2", data=UiS2, compression=None) + del UiS2 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux transport + print("--------------working on scalar flux transport...") + start_time = time.time() + + UiUjS = np.array(input_scalar_file["UiUjS_struct"]) + + UiUjS = ( + UiUjS + - Ui[..., [0, 1, 2, 0, 0, 1]] * UiS[..., [0, 1, 2, 1, 2, 2]] + - Ui[..., [0, 1, 2, 1, 2, 2]] * UiS[..., [0, 1, 2, 0, 0, 1]] + - S * UiUj[..., [0, 1, 2, 3, 4, 5]] + - S * Ui[..., [0, 1, 2, 0, 0, 1]] * Ui[..., [0, 1, 2, 1, 2, 2]] + ) + + output_file.create_dataset("UiUjS", data=UiUjS, compression=None) + del UiUjS + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Pressure-scalar correlation + print("--------------working on pressure-scalar correlation...") + start_time = time.time() + + PS = np.array(input_scalar_file["PS_struct"]) + + PS = PS - P * S + + output_file.create_dataset("PS", data=PS, compression=None) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar gradients + print("--------------working on mean scalar gradients...") + start_time = time.time() + + dSdXj = np.array(input_scalar_file["dSdxj_struct"]) + + output_file.create_dataset("dSdXj", data=dSdXj, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: convection + print("--------------working on mean scalar eqn: convection...") + + start_time = time.time() + + S_convection = np.sum(Ui * dSdXj, axis=-1) + + S_convection = -S_convection # Put on RHS + + output_file.create_dataset("S_convection", data=S_convection, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: turbulent diffusion + print("--------------working on mean scalar eqn: turbulent diffusion...") + start_time = time.time() + + dUiSdXj = np.array(input_scalar_file["dUiSdxj_struct"]) + dUiSdXj = ( + dUiSdXj + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + - S * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + ) + + S_turb_diffusion = -np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) + + output_file.create_dataset( + "S_turb_diffusion", data=S_turb_diffusion, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: molecular diffusion + print("--------------working on mean scalar eqn: molecular diffusion...") + start_time = time.time() + + d2SdXj2 = np.array(input_scalar_file["d2Sdxj2_struct"]) + + S_molecular_diffusion = (1 / (Rer_here * Prt_here)) * np.sum(d2SdXj2, axis=-1) + + output_file.create_dataset( + "S_molecular_diffusion", data=S_molecular_diffusion, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: residual + print("--------------working on mean scalar eqn: residual...") + start_time = time.time() + + S_residual = S_turb_diffusion + S_molecular_diffusion + S_convection + + output_file.create_dataset("S_residual", data=S_residual, compression=None) + del ( + S_convection, + S_turb_diffusion, + S_molecular_diffusion, + S_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: convection + print("--------------working on scalar variance eqn: convection...") + start_time = time.time() + + dS2dXj = np.array(input_scalar_file["dS2dxj_struct"]) + dS2dXj = dS2dXj - 2 * S * dSdXj + + S2_convection = np.sum(Ui * dS2dXj, axis=-1) + + S2_convection = -S2_convection # Put on RHS + + output_file.create_dataset( + "S2_convection", data=S2_convection, compression=None + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: production + print("--------------working on scalar variance eqn: production...") + start_time = time.time() + + S2_production = -2 * np.sum(UiS * dSdXj, axis=-1) + + output_file.create_dataset( + "S2_production", data=S2_production, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: turbulent diffusion + print("--------------working on scalar variance eqn: turbulent diffusion...") + start_time = time.time() + + dUjS2dXj = np.array(input_scalar_file["dUjS2dxj_struct"]) + + dUjS2dXj = ( + dUjS2dXj[..., 0] + - 2 * S[..., 0] * np.sum(Ui * dSdXj, axis=-1) + - S[..., 0] ** 2 * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - 2 * np.sum(UiS * dSdXj, axis=-1) + - 2 * S[..., 0] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) + - np.sum(Ui * dS2dXj, axis=-1) + - S2[..., 0] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + ) + + S2_turb_diffusion = -dUjS2dXj + + output_file.create_dataset( + "S2_turb_diffusion", data=S2_turb_diffusion, compression=None + ) + del dUjS2dXj, dS2dXj + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: molecular diffusion + print("--------------working on scalar variance eqn: molecular diffusion...") + start_time = time.time() + + d2S2dXi2 = np.array(input_scalar_file["d2S2dxj2_struct"]) + d2S2dXi2 = d2S2dXi2 - 2 * dSdXj**2 - 2 * S * d2SdXj2 + + S2_molecular_diffusion = 1 / (Rer_here * Prt_here) * np.sum(d2S2dXi2, axis=-1) + + output_file.create_dataset( + "S2_molecular_diffusion", data=S2_molecular_diffusion, compression=None + ) + del d2S2dXi2 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: dissipation + print("--------------working on scalar variance eqn: dissipation...") + start_time = time.time() + + S2Diss = np.array(input_scalar_file["SDiss_struct"])[..., 0] + S2Diss = S2Diss - np.sum(dSdXj**2, axis=-1) + + S2_dissipation = -2 * (1 / (Rer_here * Prt_here)) * S2Diss + + output_file.create_dataset( + "S2_dissipation", data=S2_dissipation, compression=None + ) + del S2Diss + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: residual + print("--------------working on scalar variance eqn: residual...") + start_time = time.time() + + S2_residual = ( + S2_production + + S2_turb_diffusion + + S2_molecular_diffusion + + S2_dissipation + + S2_convection + ) + + output_file.create_dataset("S2_residual", data=S2_residual, compression=None) + del ( + S2_production, + S2_turb_diffusion, + S2_molecular_diffusion, + S2_dissipation, + S2_convection, + S2_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: convection + print("--------------working on turbulent scalar flux eqn: convection...") + start_time = time.time() + + UiS_convection = np.zeros((*Nxyz, 3)) + UiS_convection[..., 0] = np.sum(Ui * dUiSdXj[..., :3], axis=-1) + UiS_convection[..., 1] = np.sum(Ui * dUiSdXj[..., 3:6], axis=-1) + UiS_convection[..., 2] = np.sum(Ui * dUiSdXj[..., 6:9], axis=-1) + + UiS_convection = -UiS_convection # Put on RHS + + output_file.create_dataset( + "UiS_convection", data=UiS_convection, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: velocity gradient production + print( + "--------------working on turbulent scalar flux eqn: velocity gradient production..." + ) + start_time = time.time() + + UiS_velocity_gradient_production = np.zeros((*Nxyz, 3)) + UiS_velocity_gradient_production[..., 0] = -np.sum( + UiS * dUidXj[..., :3], axis=-1 + ) + UiS_velocity_gradient_production[..., 1] = -np.sum( + UiS * dUidXj[..., 3:6], axis=-1 + ) + UiS_velocity_gradient_production[..., 2] = -np.sum( + UiS * dUidXj[..., 6:9], axis=-1 + ) + + output_file.create_dataset( + "UiS_velocity_gradient_production", + data=UiS_velocity_gradient_production, + compression=None, + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: scalar gradient production + print( + "--------------working on turbulent scalar flux eqn: scalar gradient production..." + ) + start_time = time.time() + + UiS_scalar_gradient_production = np.zeros((*Nxyz, 3)) + UiS_scalar_gradient_production[..., 0] = -np.sum( + UiUj[..., [0, 3, 4]] * dSdXj, axis=-1 + ) + UiS_scalar_gradient_production[..., 1] = -np.sum( + UiUj[..., [3, 1, 5]] * dSdXj, axis=-1 + ) + UiS_scalar_gradient_production[..., 2] = -np.sum( + UiUj[..., [4, 5, 2]] * dSdXj, axis=-1 + ) + + output_file.create_dataset( + "UiS_scalar_gradient_production", data=UiS_scalar_gradient_production + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: turbulent diffusion + print( + "--------------working on turbulent scalar flux eqn: turbulent diffusion..." + ) + start_time = time.time() + + dUiUjSdXj = np.array(input_scalar_file["dUiUjSdxj_struct"]) + + for i in range(3): + dUiUjSdXj[..., i] = ( + dUiUjSdXj[..., i] + - Ui[..., i] * np.sum(Ui * dSdXj, axis=-1) + - S[..., 0] * np.sum(Ui * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - Ui[..., i] * S[..., 0] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - np.sum(Ui * dUiSdXj[..., 3 * i : 3 * i + 3], axis=-1) + - UiS[..., i] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - Ui[..., i] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) + - np.sum(UiS * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + ) + + dUiUjSdXj[..., 0] = ( + dUiUjSdXj[..., 0] + - S[..., 0] * np.sum(dUiUjdXk[..., [0, 10, 14]], axis=-1) + - np.sum(UiUj[..., [0, 3, 4]] * dSdXj, axis=-1) + ) + dUiUjSdXj[..., 1] = ( + dUiUjSdXj[..., 1] + - S[..., 0] * np.sum(dUiUjdXk[..., [9, 4, 17]], axis=-1) + - np.sum(UiUj[..., [3, 1, 5]] * dSdXj, axis=-1) + ) + dUiUjSdXj[..., 2] = ( + dUiUjSdXj[..., 2] + - S[..., 0] * np.sum(dUiUjdXk[..., [12, 16, 8]], axis=-1) + - np.sum(UiUj[..., [4, 5, 2]] * dSdXj, axis=-1) + ) + + UiS_turb_diffusion = -dUiUjSdXj + + output_file.create_dataset( + "UiS_turb_diffusion", data=UiS_turb_diffusion, compression=None + ) + del dUiUjSdXj + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: molecular diffusion + print( + "--------------working on turbulent scalar flux eqn: molecular diffusion..." + ) + start_time = time.time() + + dSdUidXjdXj = np.array(input_scalar_file["dSdUidxjdxj_struct"]) + + for i in range(3): + dSdUidXjdXj[..., i] = ( + dSdUidXjdXj[..., i] + - np.sum(dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - S[..., 0] * np.sum(d2UidXj2[..., 3 * i : 3 * i + 3], axis=-1) + ) + + dUidSdXjdXj = np.array(input_scalar_file["dUidSdxjdxj_struct"]) + + for i in range(3): + dUidSdXjdXj[..., i] = ( + dUidSdXjdXj[..., i] + - np.sum(dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - Ui[..., i] * np.sum(d2SdXj2, axis=-1) + ) + + UiS_molecular_diffusion = (1 / (Rer_here * Prt_here)) * dUidSdXjdXj + ( + 1 / Rer_here + ) * dSdUidXjdXj + + output_file.create_dataset( + "UiS_molecular_diffusion", data=UiS_molecular_diffusion, compression=None + ) + del dSdUidXjdXj, dUidSdXjdXj, d2SdXj2 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: scalar-pressure correlation gradient + print( + "--------------working on turbulent scalar flux eqn: scalar-pressure correlation gradient..." + ) + start_time = time.time() + + dPSdXj = np.array(input_scalar_file["dPSdxj_struct"]) + + dPSdXj = dPSdXj - P * dSdXj - S * dPdXj + + UiS_scalar_pressure_correlation_gradient = -dPSdXj + + output_file.create_dataset( + "UiS_scalar_pressure_correlation_gradient", + data=UiS_scalar_pressure_correlation_gradient, + compression=None, + ) + del dPSdXj + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: pressure-scalar gradient correlation + print( + "--------------working on turbulent scalar flux eqn: pressure-scalar gradient correlation..." + ) + start_time = time.time() + + PdSdXi = np.array(input_scalar_file["PdSdxi_struct"]) + + PdSdXi = PdSdXi - P * dSdXj + + UiS_pressure_scalar_gradient_correlation = PdSdXi + + output_file.create_dataset( + "UiS_pressure_scalar_gradient_correlation", + data=UiS_pressure_scalar_gradient_correlation, + compression=None, + ) + del PdSdXi + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: dissipation + print("--------------working on turbulent scalar flux eqn: dissipation...") + start_time = time.time() + + UiSDiss = np.array(input_scalar_file["SDiss_struct"])[..., [1, 2, 3]] + + for i in range(3): + UiSDiss[..., i] = UiSDiss[..., i] - np.sum( + dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1 + ) + + UiS_dissipation = -((1 / Rer_here) + (1 / (Rer_here * Prt_here))) * UiSDiss + + output_file.create_dataset( + "UiS_dissipation", data=UiS_dissipation, compression=None + ) + del UiSDiss + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: residual + print("--------------working on turbulent scalar flux eqn: residual...") + start_time = time.time() + + UiS_residual = ( + UiS_velocity_gradient_production + + UiS_scalar_gradient_production + + UiS_turb_diffusion + + UiS_molecular_diffusion + + UiS_scalar_pressure_correlation_gradient + + UiS_pressure_scalar_gradient_correlation + + UiS_dissipation + + UiS_convection + ) + + output_file.create_dataset("UiS_residual", data=UiS_residual, compression=None) + del ( + UiS_velocity_gradient_production, + UiS_scalar_gradient_production, + UiS_turb_diffusion, + UiS_molecular_diffusion, + UiS_scalar_pressure_correlation_gradient, + UiS_pressure_scalar_gradient_correlation, + UiS_dissipation, + UiS_convection, + UiS_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% + print("All computations completed and data saved successfully.")