From fb22ca27cd391b82438af22735491305e9f65b58 Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Fri, 6 Feb 2026 14:59:32 +0100 Subject: [PATCH 01/13] Initial attempt at compute_and_write_additional_sstat_fields --- .../statistics/passive_scalar_budgets.py | 775 ++++++++++++++++++ 1 file changed, 775 insertions(+) create mode 100644 pysemtools/postprocessing/statistics/passive_scalar_budgets.py diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py new file mode 100644 index 0000000..b308202 --- /dev/null +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -0,0 +1,775 @@ +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +## Preliminary functions to make life easier. +########################################################################################### +########################################################################################### +#%% generic function to compute the gradient of a scalar field +def compute_scalar_first_derivative(comm, msh, coef, scalar, scalar_deriv): + if msh.gdim == 3: + scalar_deriv.c1 = coef.dudxyz(scalar, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv.c2 = coef.dudxyz(scalar, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv.c3 = coef.dudxyz(scalar, coef.drdz, coef.dsdz, coef.dtdz) + elif msh.gdim == 2: + scalar_deriv.c1 = coef.dudxyz(scalar, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv.c2 = coef.dudxyz(scalar, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv.c3 = 0.0 * scalar_deriv.c2 + else: + import sys + sys.exit("supports either 2D or 3D data") +########################################################################################### +########################################################################################### + + +#%% generic function to compute the diagonal second derivatives of a scalar field from its gradient +def compute_scalar_second_derivative(comm, msh, coef, scalar_deriv, scalar_deriv2): + if msh.gdim == 3: + scalar_deriv2.c1 = coef.dudxyz(scalar_deriv.c1, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv2.c2 = coef.dudxyz(scalar_deriv.c2, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv2.c3 = coef.dudxyz(scalar_deriv.c3, coef.drdz, coef.dsdz, coef.dtdz) + elif msh.gdim == 2: + scalar_deriv2.c1 = coef.dudxyz(scalar_deriv.c1, coef.drdx, coef.dsdx, coef.dtdx) + scalar_deriv2.c2 = coef.dudxyz(scalar_deriv.c2, coef.drdy, coef.dsdy, coef.dtdy) + scalar_deriv2.c3 = 0.0 * scalar_deriv2.c3 + else: + import sys + sys.exit("supports either 2D or 3D data") +########################################################################################### +########################################################################################### + + +#%% generic function to write a 9 component field with input as 3 vectors of 3 components each +def write_file_9c(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c4", field=dV_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c5", field=dV_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c6", field=dV_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c7", field=dW_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c8", field=dW_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c9", field=dW_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() +########################################################################################### +########################################################################################### + + +#%% generic function to write a 6-component field with input as 2 vectors of 3 components each +def write_file_6c(comm, msh, dU_dxi, dV_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="c4", field=dV_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c5", field=dV_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c6", field=dV_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() +########################################################################################### +########################################################################################### + + +#%% generic function to write a 3-component (vector) field +def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="c1", field=dU_dxi.c1, dtype=np.single) + gradU.add_field(comm, field_name="c2", field=dU_dxi.c2, dtype=np.single) + gradU.add_field(comm, field_name="c3", field=dU_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() +########################################################################################### +########################################################################################### + + + +#%% function to generate the list of fields from the file header +def return_list_of_vars_from_filename(fname): + from pymech.neksuite.field import read_header + + header = read_header(fname) + vars_= header.nb_vars + + vel_fields = vars_[1] + pres_fields = vars_[2] + temp_fields = vars_[3] + scal_fields = vars_[4] + if scal_fields>37: + import warnings + print("Number of scalar fields: "+(f'{(scal_fields):.0f}')) + warnings.warn("The number of scalar fields above 39 is not supported. "+ + "This was done to make converted 2D statistics files consistent! "+ + "Limiting the number to 37...") + scal_fields = 37 + + + field_names = [] + for i in range(vel_fields): + tmp = [("vel_"+str(i))] + field_names = field_names + tmp + + if pres_fields==1: + field_names = field_names + ["pres"] + + if temp_fields==1: + field_names = field_names + ["temp"] + + for i in range(scal_fields): + tmp = [("scal_"+str(i))] + field_names = field_names + tmp + + return field_names +########################################################################################### +########################################################################################### + +#%% do dssum on a vector with components c1, c2, c3 +def do_dssum_on_3comp_vector(dU_dxi, msh_conn, msh): + msh_conn.dssum(field=dU_dxi.c1, msh=msh, average="multiplicity") + msh_conn.dssum(field=dU_dxi.c2, msh=msh, average="multiplicity") + msh_conn.dssum(field=dU_dxi.c3, msh=msh, average="multiplicity") +# def do_dssum_on_3comp_vector(dU_dxi, coef, msh): +# coef.dssum(dU_dxi.c1, msh) +# coef.dssum(dU_dxi.c2, msh) +# coef.dssum(dU_dxi.c3, msh) +########################################################################################### +########################################################################################### + + + +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +## generic function to compute the additional fields required for budget terms, etc. +########################################################################################### +########################################################################################### +def compute_and_write_additional_sstat_fields( + which_dir, + fname_mesh, + fname_mean, + fname_stat, + if_write_mesh=False, + which_code="NEKO", + if_do_dssum_on_derivatives=False, +): + + ########################################################################################### + # do some initial checks + import sys + + # check if file names are the same + if fname_mean != fname_stat: + sys.exit( + "fname_mean must be the same as fname_stat" + ) + + # Scalar statistics only implemented for Neko + if which_code.casefold() != "NEKO": + sys.exit("only NEKO is supported for passive scalar statistics") + + ########################################################################################### + import warnings + from mpi4py import MPI # equivalent to the use of MPI_init() in C + import numpy as np + + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.coef import Coef + from pysemtools.io.ppymech.neksuite import pynekread + + if if_do_dssum_on_derivatives: + from pysemtools.datatypes.msh_connectivity import MeshConnectivity + + ########################################################################################### + # Get mpi info + comm = MPI.COMM_WORLD + + ########################################################################################### + # intialize the mesh and some fields + msh = Mesh(comm, create_connectivity=False) + # print('mesh dimension: ', msh.gdim ) + # else: + # msh = Mesh(comm, create_connectivity=False) + + stat_fields = FieldRegistry(comm) + + # generic scalar and vector derivatives + dQ1_dxi = FieldRegistry(comm) + dQ2_dxi = FieldRegistry(comm) + dQ3_dxi = FieldRegistry(comm) + d2Q1_dxi2 = FieldRegistry(comm) + + + ########################################################################################### + # using the same .fXXXXXX extenstion as the mean fields + this_ext = fname_mean[-8:] + this_ext_check = fname_stat[-8:] + + # check the two files match. can be replaced by an error, but that seems too harsh and limiting + if this_ext != this_ext_check: + warnings.warn( + "File index of fname_stat and fname_mean differ! Hope you know what you are doing!" + ) + + full_fname_mesh = which_dir + "/" + fname_mesh + full_fname_stat = which_dir + "/" + fname_stat + + ########################################################################################### + # read mesh and compute coefs + pynekread(full_fname_mesh, comm, msh=msh, data_dtype=np.single) + if if_do_dssum_on_derivatives: + msh_conn = MeshConnectivity(comm, msh, rel_tol=1e-5) + + if msh.gdim < 3: + sys.exit("only 3D data is supported at the moment! ") + + coef = Coef(msh, comm, get_area=False) + + ########################################################################################### + # define file_keys of the fields based on the codes + + # Neko key names taken from: https://neko.cfd/docs/develop/df/d8f/statistics-guide.html + file_keys_S = ["pres"] + # "US" "VS" "WS" + file_keys_UiS = ["vel_0", "vel_1", "vel_2"] + file_keys_SS = ["temp"] + # "USS" "VSS" "WSS" + file_keys_UjSS = ["scal_2", "scal_3", "scal_4"] + # "UUS" "VVS" "WWS" "UVS" "UWS" "VWS" + file_keys_UiUjS = ["scal_5", "scal_6", "scal_7", "scal_8", "scal_9", "scal_10"] + file_keys_PS = ["scal_11"] + + + + ########################################################################################### + # S first and second derivatives + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="S", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_S, + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["S"], dQ1_dxi + ) + compute_scalar_second_derivative(comm, msh, coef, dQ1_dxi, d2Q1_dxi2) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(d2Q1_dxi2, msh_conn, msh) + + this_file_name = which_dir + "/dnSdxn" + this_ext + write_file_6c( + comm, msh, dQ1_dxi, d2Q1_dxi2, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + + ########################################################################################### + # SS first and second derivatives + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="SS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SS, + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SS"], dQ1_dxi + ) + compute_scalar_second_derivative(comm, msh, coef, dQ1_dxi, d2Q1_dxi2) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(d2Q1_dxi2, msh_conn, msh) + + this_file_name = which_dir + "/dnSSdxn" + this_ext + write_file_6c( + comm, msh, dQ1_dxi, d2Q1_dxi2, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + del d2Q1_dxi2 + + ########################################################################################### + # UiS first derivative + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="US", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["US"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUSdx" + this_ext + write_file_9c( + comm, msh, dQ1_dxi, dQ2_dxi, dQ3_dxi, + this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + + ########################################################################################### + # UiSS first derivative + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="USS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiSS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VSS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WSS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiSS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["USS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VSS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WSS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUSSdx" + this_ext + write_file_9c( + comm, msh, dQ1_dxi, dQ2_dxi, dQ3_dxi, + this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + del dQ2_dxi, dQ3_dxi + + ########################################################################################### + # UiUjS first derivative + ########################################################################################### + + actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] + + for icomp in range(0, 6): + if comm.Get_rank() == 0: + print("working on: " + actual_field_names[icomp]) + + stat_fields.add_field( + comm, + field_name=actual_field_names[icomp], + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[icomp], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry[actual_field_names[icomp]], dQ1_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + + this_file_name = ( + which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext + ) + write_file_3c( + comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + + ########################################################################################### + # PS first derivative + ########################################################################################### + + stat_fields.add_field( + comm, + field_name="PS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_PS, + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["PS"], dQ1_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + this_file_name = which_dir + "/dPSdx" + this_ext + write_file_3c( + comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh + ) + + stat_fields.clear() + del dQ1_dxi + + if comm.Get_rank() == 0: + print("-------As a great man once said: run successful: dying ...") + + +########################################################################################### +########################################################################################### +########################################################################################### +########################################################################################### +# interpolate the 42+N fields onto the user specified set of points +########################################################################################### +########################################################################################### +def interpolate_all_stat_and_pstat_fields_onto_points( + which_dir, + fname_mesh, + fname_mean, + fname_stat, + xyz, + which_code="NEKO", + nek5000_stat_type="s", + if_do_dssum_before_interp=True, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True, + interpolation_output_fname="interpolated_fields.hdf5", + find_points_tol=None +): + + from mpi4py import MPI # equivalent to the use of MPI_init() in C + import numpy as np + from scipy.io import savemat + + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.coef import Coef + from pysemtools.io.ppymech.neksuite import pynekread + from pysemtools.interpolation.probes import Probes + + if if_do_dssum_before_interp: + from pysemtools.datatypes.msh_connectivity import MeshConnectivity + + if if_create_boundingBox_for_interp: + from pysemtools.datatypes.msh_partitioning import MeshPartitioner + + ########################################################################################### + # do some initial checks + import sys + + # check if file names are the same + if fname_mean != fname_stat: + sys.exit( + "fname_mean must be the same as fname_stat" + ) + + # see if nek5000 file names, etc. are correct + if which_code.casefold() == "nek5000": + if nek5000_stat_type != "s" and nek5000_stat_type != "t": + sys.exit( + 'for NEK5000 statistics nek5000_stat_type can be either "s" or "t"' + ) + + ########################################################################################### + # Get mpi info + comm = MPI.COMM_WORLD + + ########################################################################################### + # intialize the mesh and some fields + msh = Mesh(comm, create_connectivity=False) + mean_fields = FieldRegistry(comm) + + ########################################################################################### + # using the same .fXXXXXX extenstion as the mean fields + this_ext = fname_mean[-8:] + this_ext_check = fname_stat[-8:] + + # check the two files match. can be replaced by an error, but that seems too harsh and limiting + if this_ext != this_ext_check: + warnings.warn( + "File index of fname_stat and fname_mean differ! Hope you know what you are doing!" + ) + + fname_gradU = which_dir + "/dUdx" + this_ext + fname_hessU = which_dir + "/d2Udx2" + this_ext + fname_derivP = which_dir + "/dnPdxn" + this_ext + fname_gradPU = which_dir + "/dPUdx" + this_ext + # full_fname_mean = which_dir+"/"+fname_mean + # full_fname_stat = which_dir+"/"+fname_stat + full_fname_mesh = which_dir + "/" + fname_mesh + + ########################################################################################### + # get the file name for the 44 fileds collected in run time + if which_code.casefold() == "neko": + these_names = [ + give_me_the_stat_file_name( + which_dir, fname_stat, "00", which_code, nek5000_stat_type + ) + ] + # these_field_names = [ ] + elif which_code.casefold() == "nek5000": + these_names = [ + give_me_the_stat_file_name( + which_dir, fname_mean, "01", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "02", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "03", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "04", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "05", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "06", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "07", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "08", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "09", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "10", which_code, nek5000_stat_type + ), + give_me_the_stat_file_name( + which_dir, fname_mean, "11", which_code, nek5000_stat_type + ) + ] + + # add the name of the additional fields, these are common between neko and nek5000 + these_names.extend([fname_gradU, fname_hessU, fname_derivP, fname_gradPU]) + + actual_field_names = ["UU", "VV", "WW", "UV", "UW", "VW"] + for icomp in range(0, 6): + this_file_name = ( + which_dir + "/dn" + actual_field_names[icomp] + "dxn" + this_ext + ) + these_names.append(this_file_name) + + actual_field_names = [ + "UUU", + "VVV", + "WWW", + "UUV", + "UUW", + "UVV", + "UVW", + "VVW", + "UWW", + "VWW", + ] + for icomp in range(0, 10): + this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext + these_names.append(this_file_name) + + # if comm.Get_rank() == 0: + # print(these_names) + + ########################################################################################### + # read mesh and redefine it based on the boundaring box if said + pynekread(full_fname_mesh, comm, msh=msh, data_dtype=np.single) + + if msh.gdim < 3: + sys.exit("only 3D data is supported!" , + "you can convert your data to 3D using 'convert_2Dstats_to_3D'!") + + if if_do_dssum_before_interp: + msh_conn = MeshConnectivity(comm, msh, rel_tol=1e-5) + + if if_create_boundingBox_for_interp: + xyz_max = np.max(xyz, axis=0) + xyz_min = np.min(xyz, axis=0) + + if comm.Get_rank() == 0: + print("xyz_min: ", xyz_min) + print("xyz_max: ", xyz_max) + + cond = ( + (msh.x >= xyz_min[0]) + & (msh.x <= xyz_max[0]) + & (msh.y >= xyz_min[1]) + & (msh.y <= xyz_max[1]) + & (msh.z >= xyz_min[2]) + & (msh.z <= xyz_max[2]) + ) + + mp = MeshPartitioner(comm, msh=msh, conditions=[cond]) + msh = mp.create_partitioned_mesh( + msh, partitioning_algorithm="load_balanced_linear", create_conectivity=True + ) + + ########################################################################################### + # compute coef, for interpolation + coef = Coef(msh, comm, get_area=False) + + ########################################################################################### + # initiate probes + # probes = Probes(comm, probes=xyz, msh=msh, \ + # point_interpolator_type="multiple_point_legendre_numpy", \ + # global_tree_type="domain_binning" , \ + # max_pts = 256 ) + if not if_pass_points_to_rank0_only: + probes = Probes( + comm, + probes=xyz, + msh=msh, + point_interpolator_type="multiple_point_legendre_numpy", + max_pts=128, + output_fname = interpolation_output_fname, + find_points_tol=find_points_tol + ) + else: + if comm.Get_rank() == 0: + probes = Probes( + comm, + probes=xyz, + msh=msh, + point_interpolator_type="multiple_point_legendre_numpy", + max_pts=128, + output_fname = interpolation_output_fname, + find_points_tol=find_points_tol + ) + else: + probes = Probes( + comm, + probes=None, + msh=msh, + point_interpolator_type="multiple_point_legendre_numpy", + max_pts=128, + output_fname = interpolation_output_fname, + find_points_tol=find_points_tol + ) + + ########################################################################################### + for fname in these_names: + + if comm.Get_rank() == 0: + print("----------- working on file: ", fname) + + ######################### + field_names = return_list_of_vars_from_filename(fname) + # if comm.Get_rank() == 0: + # print(field_names) + ######################### + + for icomp in range(0, len(field_names)): + if comm.Get_rank() == 0: + print( + "---working on field ", icomp, "from a total of ", len(field_names) + ) + + # load the field + mean_fields.add_field( + comm, + field_name="tmpF", + file_type="fld", + file_name=fname, + file_key=field_names[icomp], + dtype=np.single, + ) + + if if_create_boundingBox_for_interp: + mean_fields = mp.create_partitioned_field( + mean_fields, partitioning_algorithm="load_balanced_linear" + ) + + # do dssum to make it continuous + if if_do_dssum_before_interp: + msh_conn.dssum(field=mean_fields.registry["tmpF"], msh=msh, average="multiplicity") + # coef.dssum(mean_fields.registry["tmpF"], msh) + + # interpolate the fields + probes.interpolate_from_field_list( + 0, [mean_fields.registry["tmpF"]], comm, write_data=True + ) + + mean_fields.clear() +########################################################################################### +########################################################################################### From c5af29b7c49248996d2eec7537f90dc4a986e384 Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Fri, 6 Feb 2026 18:03:05 +0100 Subject: [PATCH 02/13] Add interpolate_all_stat_and_sstat_fields_onto_points --- .../statistics/passive_scalar_budgets.py | 94 +++---------------- 1 file changed, 15 insertions(+), 79 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index b308202..a3f3c02 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -501,18 +501,17 @@ def compute_and_write_additional_sstat_fields( # interpolate the 42+N fields onto the user specified set of points ########################################################################################### ########################################################################################### -def interpolate_all_stat_and_pstat_fields_onto_points( +def interpolate_all_stat_and_sstat_fields_onto_points( which_dir, fname_mesh, fname_mean, fname_stat, xyz, which_code="NEKO", - nek5000_stat_type="s", if_do_dssum_before_interp=True, if_create_boundingBox_for_interp=False, if_pass_points_to_rank0_only=True, - interpolation_output_fname="interpolated_fields.hdf5", + interpolation_output_fname="interpolated_scalar_fields.hdf5", find_points_tol=None ): @@ -542,12 +541,9 @@ def interpolate_all_stat_and_pstat_fields_onto_points( "fname_mean must be the same as fname_stat" ) - # see if nek5000 file names, etc. are correct - if which_code.casefold() == "nek5000": - if nek5000_stat_type != "s" and nek5000_stat_type != "t": - sys.exit( - 'for NEK5000 statistics nek5000_stat_type can be either "s" or "t"' - ) + # Scalar statistics only implemented for Neko + if which_code.casefold() != "NEKO": + sys.exit("only NEKO is supported for passive scalar statistics") ########################################################################################### # Get mpi info @@ -569,85 +565,25 @@ def interpolate_all_stat_and_pstat_fields_onto_points( "File index of fname_stat and fname_mean differ! Hope you know what you are doing!" ) - fname_gradU = which_dir + "/dUdx" + this_ext - fname_hessU = which_dir + "/d2Udx2" + this_ext - fname_derivP = which_dir + "/dnPdxn" + this_ext - fname_gradPU = which_dir + "/dPUdx" + this_ext - # full_fname_mean = which_dir+"/"+fname_mean - # full_fname_stat = which_dir+"/"+fname_stat full_fname_mesh = which_dir + "/" + fname_mesh - ########################################################################################### - # get the file name for the 44 fileds collected in run time - if which_code.casefold() == "neko": - these_names = [ - give_me_the_stat_file_name( - which_dir, fname_stat, "00", which_code, nek5000_stat_type - ) - ] - # these_field_names = [ ] - elif which_code.casefold() == "nek5000": - these_names = [ - give_me_the_stat_file_name( - which_dir, fname_mean, "01", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "02", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "03", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "04", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "05", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "06", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "07", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "08", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "09", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "10", which_code, nek5000_stat_type - ), - give_me_the_stat_file_name( - which_dir, fname_mean, "11", which_code, nek5000_stat_type - ) - ] + # get the file name for the 42 fileds collected in run time + these_names = which_dir + "/" + fname_stat - # add the name of the additional fields, these are common between neko and nek5000 - these_names.extend([fname_gradU, fname_hessU, fname_derivP, fname_gradPU]) + # add the name of the additional fields + these_names.extend([which_dir + "/dnSdxn" + this_ext, + which_dir + "/dnSSdxn" + this_ext, + which_dir + "/dUSdx" + this_ext, + which_dir + "/dUSSdx" + this_ext]) - actual_field_names = ["UU", "VV", "WW", "UV", "UW", "VW"] + actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] for icomp in range(0, 6): this_file_name = ( - which_dir + "/dn" + actual_field_names[icomp] + "dxn" + this_ext + which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext ) these_names.append(this_file_name) - actual_field_names = [ - "UUU", - "VVV", - "WWW", - "UUV", - "UUW", - "UVV", - "UVW", - "VVW", - "UWW", - "VWW", - ] - for icomp in range(0, 10): - this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - these_names.append(this_file_name) + thse_names.append(which_dir + "/dPSdx" + this_ext) # if comm.Get_rank() == 0: # print(these_names) From 4c29bbfeb90481ef91dfa5c14ec759845855c26c Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Fri, 6 Feb 2026 18:06:17 +0100 Subject: [PATCH 03/13] copy over RS_budgets_interpolatedPoints_notMPI.py --- ...calar_budgets_interpolatedPoints_notMPI.py | 652 ++++++++++++++++++ 1 file changed, 652 insertions(+) create mode 100644 pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py new file mode 100644 index 0000000..c5c881a --- /dev/null +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -0,0 +1,652 @@ + +#%% +##################################################################################### +##################################################################################### +##################################################################################### +# function to read and store the interpolated fields as structured and averaged fields +##################################################################################### +def read_interpolated_stat_hdf5_fields( + path_to_files , + Reynolds_number , + If_average , If_convert_to_single , + Nstruct , av_func , + output_fname='averaged_and_renamed_interpolated_fields.hdf5' + ): + + #%% + import h5py + import numpy as np + import time + + #%% + print('reading xyz coordinates...') + start_time = time.time() + with h5py.File(path_to_files+'/'+'coordinates_interpolated_fields.hdf5', 'r') as f: + XYZ_vec = np.array(f['/xyz']).T # Transpose to match MATLAB's permute([2,1]) + XYZ_vec = XYZ_vec.T + + Npts = XYZ_vec.shape[0] + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('reading the 44 statistics fields...') + start_time = time.time() + UVW_vec = np.zeros((Npts, 3)) + for i in range(1, 4): + filename = path_to_files+'/'+f'interpolated_fields0000{i}.hdf5' + with h5py.File(filename, 'r') as f: + UVW_vec[:, i-1] = np.array(f['/field_0']).flatten() + + with h5py.File(path_to_files+'/'+'interpolated_fields00004.hdf5', 'r') as f: + P_vec = np.array(f['/field_0']).flatten() + + with h5py.File(path_to_files+'/'+'interpolated_fields00005.hdf5', 'r') as f: + P2_vec = np.array(f['/field_0']).flatten() + + UiUj_vec = np.zeros((Npts, 6)) + for i in range(1, 7): + tmp = str(5 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + UiUj_vec[:, i-1] = np.array(f['/field_0']).flatten() + + UiUjUk_vec = np.zeros((Npts, 10)) + for i in range(1, 11): + tmp = str(11 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + UiUjUk_vec[:, i-1] = np.array(f['/field_0']).flatten() + + # Reordering columns based on MATLAB's conversion + UiUjUk_vec = UiUjUk_vec[:, [0, 1, 2, 3, 4, 5, 7, 8, 9, 6]] + + Ui4_vec = np.zeros((Npts, 3)) + for i in range(1, 4): + tmp = str(21 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + Ui4_vec[:, i-1] = np.array(f['/field_0']).flatten() + + with h5py.File(path_to_files+'/'+'interpolated_fields00025.hdf5', 'r') as f: + P3_vec = np.array(f['/field_0']).flatten() + + with h5py.File(path_to_files+'/'+'interpolated_fields00026.hdf5', 'r') as f: + P4_vec = np.array(f['/field_0']).flatten() + + PUi_vec = np.zeros((Npts, 3)) + for i in range(1, 4): + tmp = str(26 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + PUi_vec[:, i-1] = np.array(f['/field_0']).flatten() + + PGij_vec = np.zeros((Npts, 9)) + for i in range(1, 10): + tmp = str(29 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + PGij_vec[:, i-1] = np.array(f['/field_0']).flatten() + + pseudoDiss_vec = np.zeros((Npts, 6)) + for i in range(1, 7): + tmp = str(38 + i) + fnum = ('000000'[:5 - len(tmp)] + tmp) + filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' + with h5py.File(filename, 'r') as f: + pseudoDiss_vec[:, i-1] = np.array(f['/field_0']).flatten() + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% make 1D arrays 2D + P_vec = P_vec[:, np.newaxis] + P2_vec = P2_vec[:, np.newaxis] + P3_vec = P3_vec[:, np.newaxis] + P4_vec = P4_vec[:, np.newaxis] + + #%% + print('reading the additional 99 fields...') + start_time = time.time() + + def read_hdf5_field(filename): + with h5py.File(filename, 'r') as f: + return np.array(f['/field_0']).flatten() + + def generate_filenames(start, count): + return [path_to_files+'/'+f'interpolated_fields{str(start + i).zfill(5)}.hdf5' for i in range(1, count + 1)] + + dUidxj_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(44, 9)]) + d2Uidx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(53, 9)]) + dPdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(62, 3)]) + d2Pdx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(65, 3)]) + dPUidxj_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(68, 9)]) + dUiUjdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(77, 3)] + + [read_hdf5_field(f) for f in generate_filenames(83, 3)] + + [read_hdf5_field(f) for f in generate_filenames(89, 3)] + + [read_hdf5_field(f) for f in generate_filenames(95, 3)] + + [read_hdf5_field(f) for f in generate_filenames(101, 3)] + + [read_hdf5_field(f) for f in generate_filenames(107, 3)]) + d2UiUjdx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(80, 3)] + + [read_hdf5_field(f) for f in generate_filenames(86, 3)] + + [read_hdf5_field(f) for f in generate_filenames(92, 3)] + + [read_hdf5_field(f) for f in generate_filenames(98, 3)] + + [read_hdf5_field(f) for f in generate_filenames(104, 3)] + + [read_hdf5_field(f) for f in generate_filenames(110, 3)]) + dUiUjUkdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(113, 30)]) + + # Reordering columns based on MATLAB's conversion + dUiUjUkdx_vec = dUiUjUkdx_vec[:, list(range(18)) + list(range(21, 30)) + list(range(18, 21))] + + print('Finished reading all fields.') + print(f'Done in {time.time() - start_time:.2f} seconds.') + + # Save the variables in a directory + vars = {} + for name in list(locals().keys()): + if name.endswith('_vec') and isinstance(locals()[name], np.ndarray): + vars[name] = locals()[name] + + #%% Convert arrays to single precision if required + if If_convert_to_single: + print('Converting arrays into single precision...') + start_time = time.time() + for name in list(vars.keys()): + if name.endswith('_vec') and isinstance(vars[name], np.ndarray): + # print(np.shape(locals()[name])) + vars[name] = vars[name].astype(np.float32) + print('Conversion complete.') + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Reshape arrays based on Nstruct + print('Reshaping into arrays...') + start_time = time.time() + for name in list(vars.keys()): + if name.endswith('_vec') and isinstance(vars[name], np.ndarray): + reshaped_name = name[:-4] + '_struct' + vars[reshaped_name] = vars[name].reshape((Nstruct[1], Nstruct[0], Nstruct[2], vars[name].shape[1]),order="F") + del vars[name] + print('Reshaping complete.') + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Permute arrays to original shape + print('Permuting arrays into the original shape...') + start_time = time.time() + for name in list(vars.keys()): + if name.endswith('_struct') and isinstance(vars[name], np.ndarray): + vars[name] = np.transpose(vars[name], (1, 0, 2, 3)) + print('Permutation complete.') + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Apply user-specified averaging function if required + if If_average: + print('Taking the user-specified average using function av_func...') + start_time = time.time() + for name in list(vars.keys()): + if name.endswith('_struct') and isinstance(vars[name], np.ndarray): + vars[name] = av_func(vars[name]) + print('Averaging complete.') + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Reynolds number needed to calculate viscous related terms later + Rer_here = Reynolds_number + vars['Rer_here'] = Rer_here + + #%% Save the data in HDF5 format + print('Saving the data in HDF5 format...') + start_time = time.time() + with h5py.File(path_to_files+'/'+output_fname, 'w') as hf: + global_vars = dict(vars) # Create a copy to avoid modification issues + for name, data in global_vars.items(): + if (name.endswith('_struct') or name.endswith('_here')) and isinstance(data, (np.ndarray, int, float)): + hf.create_dataset(name, data=data) + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('Data saved successfully in HDF5 format.') + + + +#%% +##################################################################################### +##################################################################################### +##################################################################################### +# function to read the raw but averaged fields and calcuate the budgets in Cartesian coordinates +##################################################################################### +def calculate_budgets_in_Cartesian( + path_to_files = './' , + input_filename = "averaged_and_renamed_interpolated_fields.hdf5", + output_filename = "pstat3d_format.hdf5" ): + + #%% + import h5py + import numpy as np + import time + + #%% + with h5py.File(path_to_files+'/'+input_filename, "r") as input_file, \ + h5py.File(path_to_files+'/'+output_filename, "w") as output_file: + + #%% + Rer_here = np.float64(input_file['Rer_here']) + print('Reynolds number = ', Rer_here) + + output_file.create_dataset('Rer_here', data=Rer_here, compression=None) + + #%% XYZ coordinates + print('--------------working on XYZ coordinates...') + start_time = time.time() + + XYZ = np.array(input_file['XYZ_struct']) + output_file.create_dataset('XYZ', data=XYZ, compression=None) + Nxyz = XYZ.shape[:3] + print('Number of points: ' , Nxyz) + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + # Mean Velocities + print('--------------working on mean velocities...') + start_time = time.time() + + UVW = np.array(input_file['UVW_struct']) + output_file.create_dataset('UVW', data=UVW, compression=None) + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Reynolds Stresses + print('--------------working on Reynolds stresses...') + start_time = time.time() + + Rij = np.array(input_file['UiUj_struct']) + Rij = Rij - (UVW[..., [0, 1, 2, 0, 0, 1]] * UVW[..., [0, 1, 2, 1, 2, 2]]) + + output_file.create_dataset('Rij', data=Rij, compression=None) + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Pressure and Its Moments + print('--------------working on pressure and its moments...') + start_time = time.time() + + P = np.array(input_file['P_struct']) + P2 = np.array(input_file['P2_struct']) + P3 = np.array(input_file['P3_struct']) + P4 = np.array(input_file['P4_struct']) + + P2 = P2 - P**2 + P3 = P3 - P**3 - 3 * P * P2 + P4 = P4 - P**4 - 6 * P**2 * P2 - 4 * P * P3 + + output_file.create_dataset('P', data=P, compression=None) + output_file.create_dataset('P2', data=P2, compression=None) + output_file.create_dataset('P3', data=P3, compression=None) + output_file.create_dataset('P4', data=P4, compression=None) + + del P2, P3, P4 + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('--------------working on tripple products...') + start_time = time.time() + + # Load relevant datasets from the HDF5 file + UiUjUk = np.array(input_file['UiUjUk_struct'][:]) + + # Perform calculations similar to Matlab + UiUjUk = UiUjUk - ( + UVW[:, :, :, [0, 1, 2, 0, 0, 0, 1, 0, 1, 0]] * + UVW[:, :, :, [0, 1, 2, 0, 0, 1, 1, 2, 2, 1]] * + UVW[:, :, :, [0, 1, 2, 1, 2, 1, 2, 2, 2, 2]] + ) - ( + UVW[:, :, :, [0, 1, 2, 0, 0, 0, 1, 0, 1, 0]] * + Rij[:, :, :, [0, 1, 2, 3, 4, 1, 5, 2, 2, 5]] + ) - ( + UVW[:, :, :, [0, 1, 2, 0, 0, 1, 1, 2, 2, 1]] * + Rij[:, :, :, [0, 1, 2, 3, 4, 3, 5, 4, 5, 4]] + ) - ( + UVW[:, :, :, [0, 1, 2, 1, 2, 1, 2, 2, 2, 2]] * + Rij[:, :, :, [0, 1, 2, 0, 0, 3, 1, 4, 5, 3]] + ) + + # Save result into HDF5 file + output_file.create_dataset('UiUjUk', data=UiUjUk) + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('--------------working on velocity kurtosis...') + start_time = time.time() + + # Load Ui4 from the input file + Ui4 = np.array(input_file['Ui4_struct'][:]) + + # Perform kurtosis calculation + Ui4 = Ui4 - UVW**4 - 6 * UVW**2 * Rij[:, :, :, 0:3] - 4 * UVW * UiUjUk[:, :, :, 0:3] + + # Save result into HDF5 file + output_file.create_dataset('Ui4', data=Ui4) + del UiUjUk, Ui4 + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print("--------------working on velocity gradients...") + start_time = time.time() + + # Read velocity gradient tensor from input file + dUidXj = np.array(input_file["dUidxj_struct"][()]) + + # Save to output file + output_file.create_dataset("dUidXj", data=dUidXj) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print("--------------working on momentum convection terms...") + start_time = time.time() + + + # Reshape dUidXj + dUidXj_reshaped = dUidXj.reshape(Nxyz[0], Nxyz[1], Nxyz[2], 3, 3, order="F") + + # Compute Momentum convection + Momentum_convection = np.sum(UVW[..., np.newaxis] * dUidXj_reshaped, axis=3) + + # Save to output file + output_file.create_dataset("Momentum_convection", data=Momentum_convection) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print("--------------working on the residual momentum convection terms...") + start_time = time.time() + + # Compute Residual Momentum convection terms + Momentum_convectionRes = np.zeros((*Nxyz, 3)) # Shape: (N1, N2, N3, 3) + Momentum_convectionRes[..., 0] = np.sum(UVW[..., [1, 2]] * dUidXj[..., [1, 2]], axis=-1) + Momentum_convectionRes[..., 1] = np.sum(UVW[..., [0, 2]] * dUidXj[..., [3, 5]], axis=-1) + Momentum_convectionRes[..., 2] = np.sum(UVW[..., [0, 1]] * dUidXj[..., [6, 7]], axis=-1) + + # Save to output file + output_file.create_dataset("Momentum_convectionRes", data=Momentum_convectionRes) + del Momentum_convectionRes + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print("--------------working on momentum pressure terms...") + print("Warning: assuming rho=1 everywhere!") + start_time = time.time() + + # Compute Momentum pressure + Momentum_pressure = -np.array(input_file["dPdx_struct"][()]) + + # Save to output file + output_file.create_dataset("Momentum_pressure", data=Momentum_pressure) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print('--------------working on production terms...') + start_time = time.time() + + Prod_ij = np.zeros((*dUidXj.shape[:3], 6)) + + Prod_ij[..., 0] = -2 * np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., :3], axis=3) + Prod_ij[..., 1] = -2 * np.sum(Rij[..., [3, 1, 5]] * dUidXj[..., 3:6], axis=3) + Prod_ij[..., 2] = -2 * np.sum(Rij[..., [4, 5, 2]] * dUidXj[..., 6:9], axis=3) + Prod_ij[..., 3] = -np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., 3:6] + + Rij[..., [3, 1, 5]] * dUidXj[..., :3], axis=3) + Prod_ij[..., 4] = -np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., 6:9] + + Rij[..., [4, 5, 2]] * dUidXj[..., :3], axis=3) + Prod_ij[..., 5] = -np.sum(Rij[..., [3, 1, 5]] * dUidXj[..., 6:9] + + Rij[..., [4, 5, 2]] * dUidXj[..., 3:6], axis=3) + + TKE_prod = np.sum(Prod_ij[..., :3], axis=3) / 2 + + output_file.create_dataset('Prod_ij', data=Prod_ij) + output_file.create_dataset('TKE_prod', data=TKE_prod) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print('--------------working on convection terms...') + start_time = time.time() + + dRij_dxk = np.array(input_file['dUiUjdx_struct'][:]) + + dRij_dxk = dRij_dxk - \ + UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] * \ + dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] - \ + UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] * \ + dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + + Conv_ij = np.zeros((*UVW.shape[:3], 6)) + + Conv_ij[..., 0] = np.sum(UVW[..., :3] * dRij_dxk[..., :3], axis=3) + Conv_ij[..., 1] = np.sum(UVW[..., :3] * dRij_dxk[..., 3:6], axis=3) + Conv_ij[..., 2] = np.sum(UVW[..., :3] * dRij_dxk[..., 6:9], axis=3) + Conv_ij[..., 3] = np.sum(UVW[..., :3] * dRij_dxk[..., 9:12], axis=3) + Conv_ij[..., 4] = np.sum(UVW[..., :3] * dRij_dxk[..., 12:15], axis=3) + Conv_ij[..., 5] = np.sum(UVW[..., :3] * dRij_dxk[..., 15:18], axis=3) + + Conv_ij = -Conv_ij + TKE_conv = np.sum(Conv_ij[..., :3], axis=3) / 2 + + output_file.create_dataset('Conv_ij', data=Conv_ij) + output_file.create_dataset('TKE_conv', data=TKE_conv) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print('--------------working on momentum turbulent diffusion terms...') + start_time = time.time() + + Momentum_turb_diffusion = np.zeros((*dRij_dxk.shape[:3], 3)) + + Momentum_turb_diffusion[..., 0] = -np.sum(dRij_dxk[..., [0, 10, 14]], axis=3) + Momentum_turb_diffusion[..., 1] = -np.sum(dRij_dxk[..., [9, 4, 17]], axis=3) + Momentum_turb_diffusion[..., 2] = -np.sum(dRij_dxk[..., [12, 16, 8]], axis=3) + + output_file.create_dataset('Momentum_turb_diffusion', data=Momentum_turb_diffusion) + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print('--------------working on dissipation terms...') + start_time = time.time() + Diss_ij = np.array(input_file['pseudoDiss_struct'][()]) + + # Compute dissipation terms + Diss_ij[..., 0] -= np.sum(dUidXj[..., 0:3] ** 2, axis=-1) + Diss_ij[..., 1] -= np.sum(dUidXj[..., 3:6] ** 2, axis=-1) + Diss_ij[..., 2] -= np.sum(dUidXj[..., 6:9] ** 2, axis=-1) + Diss_ij[..., 3] -= np.sum(dUidXj[..., 0:3] * dUidXj[..., 3:6], axis=-1) + Diss_ij[..., 4] -= np.sum(dUidXj[..., 0:3] * dUidXj[..., 6:9], axis=-1) + Diss_ij[..., 5] -= np.sum(dUidXj[..., 3:6] * dUidXj[..., 6:9], axis=-1) + + Diss_ij = -2.0 / Rer_here * Diss_ij + TKE_diss = np.sum(Diss_ij[..., 0:3], axis=-1) / 2 + + # Save results to output HDF5 file + output_file.create_dataset('Diss_ij', data=Diss_ij, compression=None) + output_file.create_dataset('TKE_diss', data=TKE_diss, compression=None) + del Diss_ij + print(f"Time taken: {time.time() - start_time:.2f} seconds") + + #%% + print('--------------working on turbulent transport...') + start_time = time.time() + + # Define index mappings + ind_dRjkdxk = np.array([ + (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3])-1, + (np.array([4, 2, 6]) - 1) * 3 + np.array([1, 2, 3])-1, + (np.array([5, 6, 3]) - 1) * 3 + np.array([1, 2, 3])-1 + ]) + + ind_tripple = np.zeros((3, 3, 3), dtype=int) + ind_tripple[:, 0, 0] = (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 0, 1] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 0, 2] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 1, 0] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 1, 1] = (np.array([6, 2, 7]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 1, 2] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 2, 0] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 2, 1] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 2, 2] = (np.array([8, 9, 3]) - 1) * 3 + np.array([1, 2, 3])-1 + + # Allocate memory + TurbTrans_ij = np.zeros((*Nxyz, 3, 3), dtype=np.float32) + + # Compute turbulent transport + for i in range(3): + for j in range(3): + TurbTrans_ij[..., i, j] = ( + - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[0, i, j]]) + - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[1, i, j]]) + - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[2, i, j]]) + ) + + TurbTrans_ij[..., i, j] += ( + UVW[..., i] * np.sum(UVW * dUidXj[..., (np.arange(3) + j * 3)], axis=-1) + + UVW[..., j] * np.sum(UVW * dUidXj[..., (np.arange(3) + i * 3)], axis=-1) + + UVW[..., i] * np.sum(dRij_dxk[..., ind_dRjkdxk[j, :]], axis=-1) + + UVW[..., j] * np.sum(dRij_dxk[..., ind_dRjkdxk[i, :]], axis=-1) + ) + + # Reorder indices + TurbTrans_ij = TurbTrans_ij.reshape(TurbTrans_ij.shape[:-2] + (-1,) , order="F") + TurbTrans_ij = TurbTrans_ij[..., [0, 4, 8, 1, 2, 5]] + + # Final computation + TurbTrans_ij -= Conv_ij + Prod_ij + TKE_turbTrans = np.sum(TurbTrans_ij[..., 0:3], axis=-1) / 2 + + # Save results + output_file.create_dataset('TurbTrans_ij', data=TurbTrans_ij, compression=None) + output_file.create_dataset('TKE_turbTrans', data=TKE_turbTrans, compression=None) + + # Cleanup + del TurbTrans_ij, Conv_ij, Prod_ij, dRij_dxk + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('--------------working on viscous diffusion...') + start_time = time.time() + + # Read datasets + d2Ui_dx2 = np.array(input_file['d2Uidx2_struct']) + d2Rij_dx2 = np.array(input_file['d2UiUjdx2_struct']) + + # Compute d2Rij_dx2 + d2Rij_dx2 = ( + d2Rij_dx2 + - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] + * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] + * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + - 2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + ) + + # Compute viscous diffusion + ViscDiff_ij = (1 / Rer_here) * np.sum(d2Rij_dx2.reshape((*Nxyz, 3, -1), order="F"), axis=3) + + # Compute TKE viscous diffusion + TKE_ViscDiff = np.sum(ViscDiff_ij[..., 0:3], axis=-1) / 2 + + # Save results + output_file.create_dataset('ViscDiff_ij', data=ViscDiff_ij, compression=None) + output_file.create_dataset('TKE_ViscDiff', data=TKE_ViscDiff, compression=None) + + # Cleanup + del d2Rij_dx2, ViscDiff_ij + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Momentum viscous diffusion terms + print('--------------working on momentum viscous diffusion terms...') + start_time = time.time() + + Momentum_viscous_diffusion = (1 / Rer_here) * \ + np.sum(d2Ui_dx2.reshape(*Nxyz, 3, 3, order="F"), axis=3) + + output_file.create_dataset('Momentum_viscous_diffusion', data=Momentum_viscous_diffusion, compression=None) + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Momentum residual terms + print('--------------working on momentum residual terms...') + start_time = time.time() + + Momentum_residual = ( + Momentum_viscous_diffusion + + Momentum_turb_diffusion + + Momentum_pressure + - Momentum_convection + ) + + output_file.create_dataset('Momentum_residual', data=Momentum_residual, compression=None) + + # Cleanup + del Momentum_viscous_diffusion, Momentum_turb_diffusion, \ + Momentum_pressure, Momentum_convection, Momentum_residual + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Pressure-rate-of-strain terms + print('--------------working on pressure-rate-of-strain terms...') + start_time = time.time() + + PRS_ij = np.array(input_file['PGij_struct']) + PRS_ij = PRS_ij - P * dUidXj + PRS_ij = PRS_ij[..., [0, 4, 8, 1, 2, 5]] + PRS_ij[..., [0, 4, 8, 3, 6, 7]] + + output_file.create_dataset('PRS_ij', data=PRS_ij, compression=None) + + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print('--------------working on pressure transport terms...') + start_time = time.time() + + dpdx = np.array(input_file['dPdx_struct']) + dpudx = np.array(input_file['dPUidxj_struct']) + + dpudx = dpudx - (P * dUidXj) \ + - (UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] * dpdx[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + + PressTrans_ij = -(dpudx[..., [0, 4, 8, 1, 2, 5]] + dpudx[..., [0, 4, 8, 3, 6, 7]]) + + output_file.create_dataset('dpdx', data=dpdx, compression=None) + output_file.create_dataset('PressTrans_ij', data=PressTrans_ij, compression=None) + + del dpudx + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% Velocity-Pressure Gradient Terms + print('--------------working on velocity-pressure-gradient terms...') + start_time = time.time() + + VelPressGrad_ij = PressTrans_ij - PRS_ij + TKE_VelPressGrad = np.sum(VelPressGrad_ij[..., 0:3], axis=-1) / 2 + + output_file.create_dataset('VelPressGrad_ij', data=VelPressGrad_ij, compression=None) + output_file.create_dataset('TKE_VelPressGrad', data=TKE_VelPressGrad, compression=None) + + del PRS_ij, PressTrans_ij, VelPressGrad_ij + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% TKE budget residual + print('--------------working on TKE budgets...') + start_time = time.time() + + TKE_residual = ( + TKE_prod + TKE_diss + TKE_turbTrans + TKE_ViscDiff + TKE_VelPressGrad + TKE_conv + ) + + output_file.create_dataset('TKE_residual', data=TKE_residual, compression=None) + + del TKE_prod, TKE_diss, TKE_turbTrans, TKE_ViscDiff, \ + TKE_VelPressGrad, TKE_conv, TKE_residual + print(f'Done in {time.time() - start_time:.2f} seconds.') + + #%% + print("All computations completed and data saved successfully.") From 6bc67bff49a3b716769e219f4c82a190015a5a84 Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Thu, 12 Feb 2026 09:04:54 +0100 Subject: [PATCH 04/13] working on passive scalar budgets --- ...calar_budgets_interpolatedPoints_notMPI.py | 171 +++++++----------- 1 file changed, 61 insertions(+), 110 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index c5c881a..34a2a23 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -5,12 +5,13 @@ ##################################################################################### # function to read and store the interpolated fields as structured and averaged fields ##################################################################################### -def read_interpolated_stat_hdf5_fields( +def read_interpolated_scalar_stat_hdf5_fields( path_to_files , Reynolds_number , + Prandtl_number , If_average , If_convert_to_single , Nstruct , av_func , - output_fname='averaged_and_renamed_interpolated_fields.hdf5' + output_fname='averaged_and_renamed_interpolated_scalar_fields.hdf5' ): #%% @@ -18,10 +19,26 @@ def read_interpolated_stat_hdf5_fields( import numpy as np import time + #%% + # Generic function to load field by number + def load_field(num, prefix='interpolated_scalar_fields'): + """Load HDF5 field from file by number. + + Parameters: + - num: field number (int) + - prefix: filename prefix (default: 'interpolated_scalar_fields') + + Returns: + - Flattened numpy array of field data + """ + filename = path_to_files + '/' + f'{prefix}{num:05d}.hdf5' + with h5py.File(filename, 'r') as f: + return np.array(f['/field_0']).flatten() + #%% print('reading xyz coordinates...') start_time = time.time() - with h5py.File(path_to_files+'/'+'coordinates_interpolated_fields.hdf5', 'r') as f: + with h5py.File(path_to_files+'/'+'coordinates_interpolated_scalar_fields.hdf5', 'r') as f: XYZ_vec = np.array(f['/xyz']).T # Transpose to match MATLAB's permute([2,1]) XYZ_vec = XYZ_vec.T @@ -29,117 +46,43 @@ def read_interpolated_stat_hdf5_fields( print(f'Done in {time.time() - start_time:.2f} seconds.') #%% - print('reading the 44 statistics fields...') + print('reading the 42 statistics fields...') start_time = time.time() - UVW_vec = np.zeros((Npts, 3)) - for i in range(1, 4): - filename = path_to_files+'/'+f'interpolated_fields0000{i}.hdf5' - with h5py.File(filename, 'r') as f: - UVW_vec[:, i-1] = np.array(f['/field_0']).flatten() - - with h5py.File(path_to_files+'/'+'interpolated_fields00004.hdf5', 'r') as f: - P_vec = np.array(f['/field_0']).flatten() - - with h5py.File(path_to_files+'/'+'interpolated_fields00005.hdf5', 'r') as f: - P2_vec = np.array(f['/field_0']).flatten() - - UiUj_vec = np.zeros((Npts, 6)) - for i in range(1, 7): - tmp = str(5 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - UiUj_vec[:, i-1] = np.array(f['/field_0']).flatten() - - UiUjUk_vec = np.zeros((Npts, 10)) - for i in range(1, 11): - tmp = str(11 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - UiUjUk_vec[:, i-1] = np.array(f['/field_0']).flatten() - - # Reordering columns based on MATLAB's conversion - UiUjUk_vec = UiUjUk_vec[:, [0, 1, 2, 3, 4, 5, 7, 8, 9, 6]] - - Ui4_vec = np.zeros((Npts, 3)) - for i in range(1, 4): - tmp = str(21 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - Ui4_vec[:, i-1] = np.array(f['/field_0']).flatten() - - with h5py.File(path_to_files+'/'+'interpolated_fields00025.hdf5', 'r') as f: - P3_vec = np.array(f['/field_0']).flatten() - - with h5py.File(path_to_files+'/'+'interpolated_fields00026.hdf5', 'r') as f: - P4_vec = np.array(f['/field_0']).flatten() - - PUi_vec = np.zeros((Npts, 3)) - for i in range(1, 4): - tmp = str(26 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - PUi_vec[:, i-1] = np.array(f['/field_0']).flatten() - - PGij_vec = np.zeros((Npts, 9)) - for i in range(1, 10): - tmp = str(29 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - PGij_vec[:, i-1] = np.array(f['/field_0']).flatten() - - pseudoDiss_vec = np.zeros((Npts, 6)) - for i in range(1, 7): - tmp = str(38 + i) - fnum = ('000000'[:5 - len(tmp)] + tmp) - filename = path_to_files+'/'+f'interpolated_fields{fnum}.hdf5' - with h5py.File(filename, 'r') as f: - pseudoDiss_vec[:, i-1] = np.array(f['/field_0']).flatten() + + S_vec = load_field(0) + UiS_vec = np.column_stack([load_field(i) for i in range(1, 4)]) + S2_vec = load_field(4) + S3_vec = load_field(5) + S4_vec = load_field(6) + UiS2_vec = np.column_stack([load_field(i) for i in range(7, 10)]) + UiUjS_vec = np.column_stack([load_field(i) for i in range(10, 16)]) + PS_vec = load_field(16) + PdSdxi_vec = np.column_stack([load_field(i) for i in range(17, 20)]) + UidSdxj_vec = np.column_stack([load_field(i) for i in range(20, 29)]) + SdUidxj_vec = np.column_stack([load_field(i) for i in range(29, 38)]) + SDiss_vec = np.column_stack([load_field(i) for i in range(38, 42)]) print(f'Done in {time.time() - start_time:.2f} seconds.') #%% make 1D arrays 2D - P_vec = P_vec[:, np.newaxis] - P2_vec = P2_vec[:, np.newaxis] - P3_vec = P3_vec[:, np.newaxis] - P4_vec = P4_vec[:, np.newaxis] + S_vec = S_vec[:, np.newaxis] + S2_vec = S2_vec[:, np.newaxis] + S3_vec = S3_vec[:, np.newaxis] + S4_vec = S4_vec[:, np.newaxis] + PS_vec = PS_vec[:, np.newaxis] #%% - print('reading the additional 99 fields...') + print('reading the additional fields...') start_time = time.time() - def read_hdf5_field(filename): - with h5py.File(filename, 'r') as f: - return np.array(f['/field_0']).flatten() - - def generate_filenames(start, count): - return [path_to_files+'/'+f'interpolated_fields{str(start + i).zfill(5)}.hdf5' for i in range(1, count + 1)] - - dUidxj_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(44, 9)]) - d2Uidx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(53, 9)]) - dPdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(62, 3)]) - d2Pdx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(65, 3)]) - dPUidxj_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(68, 9)]) - dUiUjdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(77, 3)] + - [read_hdf5_field(f) for f in generate_filenames(83, 3)] + - [read_hdf5_field(f) for f in generate_filenames(89, 3)] + - [read_hdf5_field(f) for f in generate_filenames(95, 3)] + - [read_hdf5_field(f) for f in generate_filenames(101, 3)] + - [read_hdf5_field(f) for f in generate_filenames(107, 3)]) - d2UiUjdx2_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(80, 3)] + - [read_hdf5_field(f) for f in generate_filenames(86, 3)] + - [read_hdf5_field(f) for f in generate_filenames(92, 3)] + - [read_hdf5_field(f) for f in generate_filenames(98, 3)] + - [read_hdf5_field(f) for f in generate_filenames(104, 3)] + - [read_hdf5_field(f) for f in generate_filenames(110, 3)]) - dUiUjUkdx_vec = np.column_stack([read_hdf5_field(f) for f in generate_filenames(113, 30)]) - - # Reordering columns based on MATLAB's conversion - dUiUjUkdx_vec = dUiUjUkdx_vec[:, list(range(18)) + list(range(21, 30)) + list(range(18, 21))] + dSdxj_vec = np.column_stack([load_field(i) for i in range(42, 45)]) + d2Sdx2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) + dS2dxj_vec = np.column_stack([load_field(i) for i in range(48, 51)]) + d2S2dx2_vec = np.column_stack([load_field(i) for i in range(51, 54)]) + dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 57)]) + dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(57, 60)]) + dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(60, 66)]) + dPSdxj_vec = np.column_stack([load_field(i) for i in range(66, 69)]) print('Finished reading all fields.') print(f'Done in {time.time() - start_time:.2f} seconds.') @@ -191,9 +134,11 @@ def generate_filenames(start, count): print('Averaging complete.') print(f'Done in {time.time() - start_time:.2f} seconds.') - #%% Reynolds number needed to calculate viscous related terms later + #%% Reynolds and Prandtl numbers needed to calculate viscous related terms later Rer_here = Reynolds_number + Prt_here = Prandtl_number vars['Rer_here'] = Rer_here + vars['Prt_here'] = Prt_here #%% Save the data in HDF5 format print('Saving the data in HDF5 format...') @@ -214,12 +159,12 @@ def generate_filenames(start, count): ##################################################################################### ##################################################################################### ##################################################################################### -# function to read the raw but averaged fields and calcuate the budgets in Cartesian coordinates +# function to read the raw but averaged fields and calcuate the scalar budgets in Cartesian coordinates ##################################################################################### -def calculate_budgets_in_Cartesian( +def calculate_scalar_budgets_in_Cartesian( path_to_files = './' , - input_filename = "averaged_and_renamed_interpolated_fields.hdf5", - output_filename = "pstat3d_format.hdf5" ): + input_filename = "averaged_and_renamed_interpolated_scalar_fields.hdf5", + output_filename = "sstat3d_format.hdf5" ): #%% import h5py @@ -236,6 +181,12 @@ def calculate_budgets_in_Cartesian( output_file.create_dataset('Rer_here', data=Rer_here, compression=None) + #%% + Prt_here = np.float64(input_file['Prt_here']) + print('Prandtl number = ', Prt_here) + + output_file.create_dataset('Prt_here', data=Prt_here, compression=None) + #%% XYZ coordinates print('--------------working on XYZ coordinates...') start_time = time.time() From 898cb6ee5b1cd2582609a50153b9b6069c18f7ae Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Fri, 13 Feb 2026 10:26:42 +0100 Subject: [PATCH 05/13] working on budgets --- .../statistics/passive_scalar_budgets.py | 207 ++-- ...calar_budgets_interpolatedPoints_notMPI.py | 1040 ++++++++++++----- 2 files changed, 889 insertions(+), 358 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index a3f3c02..a5a4a15 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -5,7 +5,7 @@ ## Preliminary functions to make life easier. ########################################################################################### ########################################################################################### -#%% generic function to compute the gradient of a scalar field +# %% generic function to compute the gradient of a scalar field def compute_scalar_first_derivative(comm, msh, coef, scalar, scalar_deriv): if msh.gdim == 3: scalar_deriv.c1 = coef.dudxyz(scalar, coef.drdx, coef.dsdx, coef.dtdx) @@ -17,12 +17,15 @@ def compute_scalar_first_derivative(comm, msh, coef, scalar, scalar_deriv): scalar_deriv.c3 = 0.0 * scalar_deriv.c2 else: import sys + sys.exit("supports either 2D or 3D data") + + ########################################################################################### ########################################################################################### -#%% generic function to compute the diagonal second derivatives of a scalar field from its gradient +# %% generic function to compute the diagonal second derivatives of a scalar field from its gradient def compute_scalar_second_derivative(comm, msh, coef, scalar_deriv, scalar_deriv2): if msh.gdim == 3: scalar_deriv2.c1 = coef.dudxyz(scalar_deriv.c1, coef.drdx, coef.dsdx, coef.dtdx) @@ -34,12 +37,15 @@ def compute_scalar_second_derivative(comm, msh, coef, scalar_deriv, scalar_deriv scalar_deriv2.c3 = 0.0 * scalar_deriv2.c3 else: import sys + sys.exit("supports either 2D or 3D data") + + ########################################################################################### ########################################################################################### -#%% generic function to write a 9 component field with input as 3 vectors of 3 components each +# %% generic function to write a 9 component field with input as 3 vectors of 3 components each def write_file_9c(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh): from pysemtools.datatypes.field import FieldRegistry from pysemtools.io.ppymech.neksuite import pynekwrite @@ -60,11 +66,13 @@ def write_file_9c(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh) pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) gradU.clear() + + ########################################################################################### ########################################################################################### -#%% generic function to write a 6-component field with input as 2 vectors of 3 components each +# %% generic function to write a 6-component field with input as 2 vectors of 3 components each def write_file_6c(comm, msh, dU_dxi, dV_dxi, fname_gradU, if_write_mesh): from pysemtools.datatypes.field import FieldRegistry from pysemtools.io.ppymech.neksuite import pynekwrite @@ -82,11 +90,13 @@ def write_file_6c(comm, msh, dU_dxi, dV_dxi, fname_gradU, if_write_mesh): pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) gradU.clear() + + ########################################################################################### ########################################################################################### -#%% generic function to write a 3-component (vector) field +# %% generic function to write a 3-component (vector) field def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): from pysemtools.datatypes.field import FieldRegistry from pysemtools.io.ppymech.neksuite import pynekwrite @@ -101,55 +111,63 @@ def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) gradU.clear() + + ########################################################################################### ########################################################################################### - -#%% function to generate the list of fields from the file header +# %% function to generate the list of fields from the file header def return_list_of_vars_from_filename(fname): from pymech.neksuite.field import read_header header = read_header(fname) - vars_= header.nb_vars + vars_ = header.nb_vars vel_fields = vars_[1] pres_fields = vars_[2] temp_fields = vars_[3] scal_fields = vars_[4] - if scal_fields>37: + if scal_fields > 37: import warnings - print("Number of scalar fields: "+(f'{(scal_fields):.0f}')) - warnings.warn("The number of scalar fields above 39 is not supported. "+ - "This was done to make converted 2D statistics files consistent! "+ - "Limiting the number to 37...") + + print("Number of scalar fields: " + (f"{(scal_fields):.0f}")) + warnings.warn( + "The number of scalar fields above 39 is not supported. " + + "This was done to make converted 2D statistics files consistent! " + + "Limiting the number to 37..." + ) scal_fields = 37 - field_names = [] for i in range(vel_fields): - tmp = [("vel_"+str(i))] + tmp = ["vel_" + str(i)] field_names = field_names + tmp - if pres_fields==1: + if pres_fields == 1: field_names = field_names + ["pres"] - if temp_fields==1: + if temp_fields == 1: field_names = field_names + ["temp"] for i in range(scal_fields): - tmp = [("scal_"+str(i))] + tmp = ["scal_" + str(i)] field_names = field_names + tmp return field_names + + ########################################################################################### ########################################################################################### -#%% do dssum on a vector with components c1, c2, c3 + +# %% do dssum on a vector with components c1, c2, c3 def do_dssum_on_3comp_vector(dU_dxi, msh_conn, msh): msh_conn.dssum(field=dU_dxi.c1, msh=msh, average="multiplicity") msh_conn.dssum(field=dU_dxi.c2, msh=msh, average="multiplicity") msh_conn.dssum(field=dU_dxi.c3, msh=msh, average="multiplicity") + + # def do_dssum_on_3comp_vector(dU_dxi, coef, msh): # coef.dssum(dU_dxi.c1, msh) # coef.dssum(dU_dxi.c2, msh) @@ -158,7 +176,6 @@ def do_dssum_on_3comp_vector(dU_dxi, msh_conn, msh): ########################################################################################### - ########################################################################################### ########################################################################################### ########################################################################################### @@ -182,9 +199,7 @@ def compute_and_write_additional_sstat_fields( # check if file names are the same if fname_mean != fname_stat: - sys.exit( - "fname_mean must be the same as fname_stat" - ) + sys.exit("fname_mean must be the same as fname_stat") # Scalar statistics only implemented for Neko if which_code.casefold() != "NEKO": @@ -217,12 +232,11 @@ def compute_and_write_additional_sstat_fields( stat_fields = FieldRegistry(comm) # generic scalar and vector derivatives - dQ1_dxi = FieldRegistry(comm) - dQ2_dxi = FieldRegistry(comm) - dQ3_dxi = FieldRegistry(comm) + dQ1_dxi = FieldRegistry(comm) + dQ2_dxi = FieldRegistry(comm) + dQ3_dxi = FieldRegistry(comm) d2Q1_dxi2 = FieldRegistry(comm) - ########################################################################################### # using the same .fXXXXXX extenstion as the mean fields this_ext = fname_mean[-8:] @@ -252,22 +266,20 @@ def compute_and_write_additional_sstat_fields( # define file_keys of the fields based on the codes # Neko key names taken from: https://neko.cfd/docs/develop/df/d8f/statistics-guide.html - file_keys_S = ["pres"] + file_keys_S = ["pres"] # "US" "VS" "WS" - file_keys_UiS = ["vel_0", "vel_1", "vel_2"] - file_keys_SS = ["temp"] + file_keys_UiS = ["vel_0", "vel_1", "vel_2"] + file_keys_SS = ["temp"] # "USS" "VSS" "WSS" - file_keys_UjSS = ["scal_2", "scal_3", "scal_4"] + file_keys_UjSS = ["scal_2", "scal_3", "scal_4"] # "UUS" "VVS" "WWS" "UVS" "UWS" "VWS" file_keys_UiUjS = ["scal_5", "scal_6", "scal_7", "scal_8", "scal_9", "scal_10"] - file_keys_PS = ["scal_11"] - - + file_keys_PS = ["scal_11"] ########################################################################################### # S first and second derivatives ########################################################################################### - + stat_fields.add_field( comm, field_name="S", @@ -277,15 +289,13 @@ def compute_and_write_additional_sstat_fields( dtype=np.single, ) - compute_scalar_first_derivative( - comm, msh, coef, stat_fields.registry["S"], dQ1_dxi - ) + compute_scalar_first_derivative(comm, msh, coef, stat_fields.registry["S"], dQ1_dxi) compute_scalar_second_derivative(comm, msh, coef, dQ1_dxi, d2Q1_dxi2) if if_do_dssum_on_derivatives: do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) do_dssum_on_3comp_vector(d2Q1_dxi2, msh_conn, msh) - + this_file_name = which_dir + "/dnSdxn" + this_ext write_file_6c( comm, msh, dQ1_dxi, d2Q1_dxi2, this_file_name, if_write_mesh=if_write_mesh @@ -296,7 +306,7 @@ def compute_and_write_additional_sstat_fields( ########################################################################################### # SS first and second derivatives ########################################################################################### - + stat_fields.add_field( comm, field_name="SS", @@ -369,16 +379,21 @@ def compute_and_write_additional_sstat_fields( this_file_name = which_dir + "/dUSdx" + this_ext write_file_9c( - comm, msh, dQ1_dxi, dQ2_dxi, dQ3_dxi, - this_file_name, if_write_mesh=if_write_mesh + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, ) stat_fields.clear() - + ########################################################################################### # UiSS first derivative ########################################################################################### - + stat_fields.add_field( comm, field_name="USS", @@ -421,8 +436,13 @@ def compute_and_write_additional_sstat_fields( this_file_name = which_dir + "/dUSSdx" + this_ext write_file_9c( - comm, msh, dQ1_dxi, dQ2_dxi, dQ3_dxi, - this_file_name, if_write_mesh=if_write_mesh + comm, + msh, + dQ1_dxi, + dQ2_dxi, + dQ3_dxi, + this_file_name, + if_write_mesh=if_write_mesh, ) stat_fields.clear() @@ -454,19 +474,15 @@ def compute_and_write_additional_sstat_fields( if if_do_dssum_on_derivatives: do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) - this_file_name = ( - which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - ) - write_file_3c( - comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh - ) + this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext + write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) stat_fields.clear() - + ########################################################################################### # PS first derivative ########################################################################################### - + stat_fields.add_field( comm, field_name="PS", @@ -483,9 +499,7 @@ def compute_and_write_additional_sstat_fields( if if_do_dssum_on_derivatives: do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) this_file_name = which_dir + "/dPSdx" + this_ext - write_file_3c( - comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh - ) + write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) stat_fields.clear() del dQ1_dxi @@ -493,6 +507,37 @@ def compute_and_write_additional_sstat_fields( if comm.Get_rank() == 0: print("-------As a great man once said: run successful: dying ...") + ########################################################################################### + # SdUidxj first derivative + ########################################################################################### + + actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] + + for icomp in range(0, 6): + if comm.Get_rank() == 0: + print("working on: " + actual_field_names[icomp]) + + stat_fields.add_field( + comm, + field_name=actual_field_names[icomp], + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[icomp], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry[actual_field_names[icomp]], dQ1_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + + this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext + write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) + + stat_fields.clear() + ########################################################################################### ########################################################################################### @@ -512,7 +557,7 @@ def interpolate_all_stat_and_sstat_fields_onto_points( if_create_boundingBox_for_interp=False, if_pass_points_to_rank0_only=True, interpolation_output_fname="interpolated_scalar_fields.hdf5", - find_points_tol=None + find_points_tol=None, ): from mpi4py import MPI # equivalent to the use of MPI_init() in C @@ -537,9 +582,7 @@ def interpolate_all_stat_and_sstat_fields_onto_points( # check if file names are the same if fname_mean != fname_stat: - sys.exit( - "fname_mean must be the same as fname_stat" - ) + sys.exit("fname_mean must be the same as fname_stat") # Scalar statistics only implemented for Neko if which_code.casefold() != "NEKO": @@ -571,19 +614,21 @@ def interpolate_all_stat_and_sstat_fields_onto_points( these_names = which_dir + "/" + fname_stat # add the name of the additional fields - these_names.extend([which_dir + "/dnSdxn" + this_ext, - which_dir + "/dnSSdxn" + this_ext, - which_dir + "/dUSdx" + this_ext, - which_dir + "/dUSSdx" + this_ext]) + these_names.extend( + [ + which_dir + "/dnSdxn" + this_ext, + which_dir + "/dnSSdxn" + this_ext, + which_dir + "/dUSdx" + this_ext, + which_dir + "/dUSSdx" + this_ext, + ] + ) actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] for icomp in range(0, 6): - this_file_name = ( - which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - ) + this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext these_names.append(this_file_name) - thse_names.append(which_dir + "/dPSdx" + this_ext) + these_names.append(which_dir + "/dPSdx" + this_ext) # if comm.Get_rank() == 0: # print(these_names) @@ -593,8 +638,10 @@ def interpolate_all_stat_and_sstat_fields_onto_points( pynekread(full_fname_mesh, comm, msh=msh, data_dtype=np.single) if msh.gdim < 3: - sys.exit("only 3D data is supported!" , - "you can convert your data to 3D using 'convert_2Dstats_to_3D'!") + sys.exit( + "only 3D data is supported!", + "you can convert your data to 3D using 'convert_2Dstats_to_3D'!", + ) if if_do_dssum_before_interp: msh_conn = MeshConnectivity(comm, msh, rel_tol=1e-5) @@ -638,8 +685,8 @@ def interpolate_all_stat_and_sstat_fields_onto_points( msh=msh, point_interpolator_type="multiple_point_legendre_numpy", max_pts=128, - output_fname = interpolation_output_fname, - find_points_tol=find_points_tol + output_fname=interpolation_output_fname, + find_points_tol=find_points_tol, ) else: if comm.Get_rank() == 0: @@ -649,8 +696,8 @@ def interpolate_all_stat_and_sstat_fields_onto_points( msh=msh, point_interpolator_type="multiple_point_legendre_numpy", max_pts=128, - output_fname = interpolation_output_fname, - find_points_tol=find_points_tol + output_fname=interpolation_output_fname, + find_points_tol=find_points_tol, ) else: probes = Probes( @@ -659,8 +706,8 @@ def interpolate_all_stat_and_sstat_fields_onto_points( msh=msh, point_interpolator_type="multiple_point_legendre_numpy", max_pts=128, - output_fname = interpolation_output_fname, - find_points_tol=find_points_tol + output_fname=interpolation_output_fname, + find_points_tol=find_points_tol, ) ########################################################################################### @@ -698,7 +745,9 @@ def interpolate_all_stat_and_sstat_fields_onto_points( # do dssum to make it continuous if if_do_dssum_before_interp: - msh_conn.dssum(field=mean_fields.registry["tmpF"], msh=msh, average="multiplicity") + msh_conn.dssum( + field=mean_fields.registry["tmpF"], msh=msh, average="multiplicity" + ) # coef.dssum(mean_fields.registry["tmpF"], msh) # interpolate the fields @@ -707,5 +756,7 @@ def interpolate_all_stat_and_sstat_fields_onto_points( ) mean_fields.clear() + + ########################################################################################### ########################################################################################### diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index 34a2a23..3b4f950 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -1,54 +1,57 @@ - -#%% +# %% ##################################################################################### ##################################################################################### ##################################################################################### # function to read and store the interpolated fields as structured and averaged fields ##################################################################################### -def read_interpolated_scalar_stat_hdf5_fields( - path_to_files , - Reynolds_number , - Prandtl_number , - If_average , If_convert_to_single , - Nstruct , av_func , - output_fname='averaged_and_renamed_interpolated_scalar_fields.hdf5' - ): - - #%% +def read_interpolated_scalar_stat_hdf5_fields( + path_to_files, + Reynolds_number, + Prandtl_number, + If_average, + If_convert_to_single, + Nstruct, + av_func, + output_fname="averaged_and_renamed_interpolated_scalar_fields.hdf5", +): + + # %% import h5py import numpy as np import time - #%% + # %% # Generic function to load field by number - def load_field(num, prefix='interpolated_scalar_fields'): + def load_field(num, prefix="interpolated_scalar_fields"): """Load HDF5 field from file by number. - + Parameters: - num: field number (int) - prefix: filename prefix (default: 'interpolated_scalar_fields') - + Returns: - Flattened numpy array of field data """ - filename = path_to_files + '/' + f'{prefix}{num:05d}.hdf5' - with h5py.File(filename, 'r') as f: - return np.array(f['/field_0']).flatten() + filename = path_to_files + "/" + f"{prefix}{num:05d}.hdf5" + with h5py.File(filename, "r") as f: + return np.array(f["/field_0"]).flatten() - #%% - print('reading xyz coordinates...') + # %% + print("reading xyz coordinates...") start_time = time.time() - with h5py.File(path_to_files+'/'+'coordinates_interpolated_scalar_fields.hdf5', 'r') as f: - XYZ_vec = np.array(f['/xyz']).T # Transpose to match MATLAB's permute([2,1]) - XYZ_vec = XYZ_vec.T + with h5py.File( + path_to_files + "/" + "coordinates_interpolated_scalar_fields.hdf5", "r" + ) as f: + XYZ_vec = np.array(f["/xyz"]).T # Transpose to match MATLAB's permute([2,1]) + XYZ_vec = XYZ_vec.T Npts = XYZ_vec.shape[0] - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% - print('reading the 42 statistics fields...') + # %% + print("reading the 42 statistics fields...") start_time = time.time() - + S_vec = load_field(0) UiS_vec = np.column_stack([load_field(i) for i in range(1, 4)]) S2_vec = load_field(4) @@ -61,243 +64,648 @@ def load_field(num, prefix='interpolated_scalar_fields'): UidSdxj_vec = np.column_stack([load_field(i) for i in range(20, 29)]) SdUidxj_vec = np.column_stack([load_field(i) for i in range(29, 38)]) SDiss_vec = np.column_stack([load_field(i) for i in range(38, 42)]) - - print(f'Done in {time.time() - start_time:.2f} seconds.') - #%% make 1D arrays 2D - S_vec = S_vec[:, np.newaxis] + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% make 1D arrays 2D + S_vec = S_vec[:, np.newaxis] S2_vec = S2_vec[:, np.newaxis] S3_vec = S3_vec[:, np.newaxis] S4_vec = S4_vec[:, np.newaxis] PS_vec = PS_vec[:, np.newaxis] - #%% - print('reading the additional fields...') + # %% + print("reading the additional fields...") start_time = time.time() dSdxj_vec = np.column_stack([load_field(i) for i in range(42, 45)]) - d2Sdx2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) + d2Sdxj2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) dS2dxj_vec = np.column_stack([load_field(i) for i in range(48, 51)]) - d2S2dx2_vec = np.column_stack([load_field(i) for i in range(51, 54)]) - dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 57)]) - dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(57, 60)]) - dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(60, 66)]) - dPSdxj_vec = np.column_stack([load_field(i) for i in range(66, 69)]) + d2S2dxj2_vec = np.column_stack([load_field(i) for i in range(51, 54)]) + dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 63)]) + dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(63, 72)]) + dUiUjSdxk_vec = np.column_stack([load_field(i) for i in range(72, 90)]) + dPSdxj_vec = np.column_stack([load_field(i) for i in range(90, 93)]) - print('Finished reading all fields.') - print(f'Done in {time.time() - start_time:.2f} seconds.') + print("Finished reading all fields.") + print(f"Done in {time.time() - start_time:.2f} seconds.") # Save the variables in a directory vars = {} for name in list(locals().keys()): - if name.endswith('_vec') and isinstance(locals()[name], np.ndarray): + if name.endswith("_vec") and isinstance(locals()[name], np.ndarray): vars[name] = locals()[name] - #%% Convert arrays to single precision if required + # %% Convert arrays to single precision if required if If_convert_to_single: - print('Converting arrays into single precision...') + print("Converting arrays into single precision...") start_time = time.time() for name in list(vars.keys()): - if name.endswith('_vec') and isinstance(vars[name], np.ndarray): + if name.endswith("_vec") and isinstance(vars[name], np.ndarray): # print(np.shape(locals()[name])) vars[name] = vars[name].astype(np.float32) - print('Conversion complete.') - print(f'Done in {time.time() - start_time:.2f} seconds.') + print("Conversion complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Reshape arrays based on Nstruct - print('Reshaping into arrays...') + # %% Reshape arrays based on Nstruct + print("Reshaping into arrays...") start_time = time.time() for name in list(vars.keys()): - if name.endswith('_vec') and isinstance(vars[name], np.ndarray): - reshaped_name = name[:-4] + '_struct' - vars[reshaped_name] = vars[name].reshape((Nstruct[1], Nstruct[0], Nstruct[2], vars[name].shape[1]),order="F") + if name.endswith("_vec") and isinstance(vars[name], np.ndarray): + reshaped_name = name[:-4] + "_struct" + vars[reshaped_name] = vars[name].reshape( + (Nstruct[1], Nstruct[0], Nstruct[2], vars[name].shape[1]), order="F" + ) del vars[name] - print('Reshaping complete.') - print(f'Done in {time.time() - start_time:.2f} seconds.') + print("Reshaping complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Permute arrays to original shape - print('Permuting arrays into the original shape...') + # %% Permute arrays to original shape + print("Permuting arrays into the original shape...") start_time = time.time() for name in list(vars.keys()): - if name.endswith('_struct') and isinstance(vars[name], np.ndarray): + if name.endswith("_struct") and isinstance(vars[name], np.ndarray): vars[name] = np.transpose(vars[name], (1, 0, 2, 3)) - print('Permutation complete.') - print(f'Done in {time.time() - start_time:.2f} seconds.') + print("Permutation complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Apply user-specified averaging function if required + # %% Apply user-specified averaging function if required if If_average: - print('Taking the user-specified average using function av_func...') + print("Taking the user-specified average using function av_func...") start_time = time.time() for name in list(vars.keys()): - if name.endswith('_struct') and isinstance(vars[name], np.ndarray): + if name.endswith("_struct") and isinstance(vars[name], np.ndarray): vars[name] = av_func(vars[name]) - print('Averaging complete.') - print(f'Done in {time.time() - start_time:.2f} seconds.') + print("Averaging complete.") + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Reynolds and Prandtl numbers needed to calculate viscous related terms later - Rer_here = Reynolds_number - Prt_here = Prandtl_number - vars['Rer_here'] = Rer_here - vars['Prt_here'] = Prt_here + # %% Reynolds and Prandtl numbers needed to calculate viscous related terms later + Rer_here = Reynolds_number + Prt_here = Prandtl_number + vars["Rer_here"] = Rer_here + vars["Prt_here"] = Prt_here - #%% Save the data in HDF5 format - print('Saving the data in HDF5 format...') + # %% Save the data in HDF5 format + print("Saving the data in HDF5 format...") start_time = time.time() - with h5py.File(path_to_files+'/'+output_fname, 'w') as hf: + with h5py.File(path_to_files + "/" + output_fname, "w") as hf: global_vars = dict(vars) # Create a copy to avoid modification issues for name, data in global_vars.items(): - if (name.endswith('_struct') or name.endswith('_here')) and isinstance(data, (np.ndarray, int, float)): + if (name.endswith("_struct") or name.endswith("_here")) and isinstance( + data, (np.ndarray, int, float) + ): hf.create_dataset(name, data=data) - print(f'Done in {time.time() - start_time:.2f} seconds.') - - #%% - print('Data saved successfully in HDF5 format.') + print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% + print("Data saved successfully in HDF5 format.") -#%% +# %% ##################################################################################### ##################################################################################### ##################################################################################### # function to read the raw but averaged fields and calcuate the scalar budgets in Cartesian coordinates +# the fluid statistics are also needed to calculate some terms in the budgets ##################################################################################### def calculate_scalar_budgets_in_Cartesian( - path_to_files = './' , - input_filename = "averaged_and_renamed_interpolated_scalar_fields.hdf5", - output_filename = "sstat3d_format.hdf5" ): - - #%% + path_to_files="./", + input_scalar_filename="averaged_and_renamed_interpolated_scalar_fields.hdf5", + input_fluid_filename="averaged_and_renamed_interpolated_fields.hdf5", + output_filename="sstat3d_format.hdf5", +): + + # %% import h5py import numpy as np import time + import sys - #%% - with h5py.File(path_to_files+'/'+input_filename, "r") as input_file, \ - h5py.File(path_to_files+'/'+output_filename, "w") as output_file: - - #%% - Rer_here = np.float64(input_file['Rer_here']) - print('Reynolds number = ', Rer_here) + # %% + with ( + h5py.File( + path_to_files + "/" + input_scalar_filename, "r" + ) as input_scalar_file, + h5py.File(path_to_files + "/" + input_fluid_filename, "r") as input_fluid_file, + h5py.File(path_to_files + "/" + output_filename, "w") as output_file, + ): - output_file.create_dataset('Rer_here', data=Rer_here, compression=None) + # %% + Rer_here = np.float64(input_scalar_file["Rer_here"]) + print("Reynolds number = ", Rer_here) - #%% - Prt_here = np.float64(input_file['Prt_here']) - print('Prandtl number = ', Prt_here) + output_file.create_dataset("Rer_here", data=Rer_here, compression=None) - output_file.create_dataset('Prt_here', data=Prt_here, compression=None) + # %% + Prt_here = np.float64(input_scalar_file["Prt_here"]) + print("Prandtl number = ", Prt_here) - #%% XYZ coordinates - print('--------------working on XYZ coordinates...') + output_file.create_dataset("Prt_here", data=Prt_here, compression=None) + + # %% XYZ coordinates + print("--------------working on XYZ coordinates...") start_time = time.time() - XYZ = np.array(input_file['XYZ_struct']) - output_file.create_dataset('XYZ', data=XYZ, compression=None) + XYZ = np.array(input_scalar_file["XYZ_struct"]) + output_file.create_dataset("XYZ", data=XYZ, compression=None) Nxyz = XYZ.shape[:3] - print('Number of points: ' , Nxyz) + print("Number of points: ", Nxyz) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Check if scalar XYZ coordinates match fluid XYZ coordinates + print( + "--------------checking if scalar XYZ coordinates match fluid XYZ coordinates..." + ) + start_time = time.time() + + XYZ_fluid = np.array(input_fluid_file["XYZ_struct"]) + Nxyz_fluid = XYZ_fluid.shape[:3] + + # Check if number of points match + if Nxyz != Nxyz_fluid: + sys.exit( + "Number of points in scalar and fluid files do not match! " + f"Scalar: {Nxyz}, Fluid: {Nxyz_fluid}" + ) + + # Check if coordinates match + if not np.allclose(XYZ, XYZ_fluid): + sys.exit("EXYZ coordinates in scalar and fluid files do not match!") + else: + print("XYZ coordinates match between scalar and fluid files.") + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Load required fluid statistics for calculating scalar budgets + # %% Mean velocities + print("--------------loading mean velocities...") + start_time = time.time() + + Ui = np.array(input_fluid_file["UVW_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean velocity derivatives + print("--------------loading mean velocity derivatives...") + start_time = time.time() + + dUidXj = np.array(input_fluid_file["dUidxj_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Pressure + print("--------------loading pressure...") + start_time = time.time() + + P = np.array(input_fluid_file["P_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reynolds stresses + print("--------------loading Reynolds stresses...") + start_time = time.time() + + UiUj = np.array(input_fluid_file["UiUj_struct"]) + UiUj = UiUj - (Ui[..., [0, 1, 2, 0, 0, 1]] * Ui[..., [0, 1, 2, 1, 2, 2]]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Reynolds stress gradients + print("--------------loading Reynolds stress gradients...") + start_time = time.time() + + dUiUjdXk = np.array(input_fluid_file["UiUjdx_struct"]) + dUiUjdXk = ( + dUiUjdXk + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Start processing scalar statistics + + # %% Scalar and Its Moments + print("--------------working on scalar and its moments...") + start_time = time.time() + + S = np.array(input_scalar_file["S_struct"]) + S2 = np.array(input_scalar_file["S2_struct"]) + S3 = np.array(input_scalar_file["S3_struct"]) + S4 = np.array(input_scalar_file["S4_struct"]) + + S2 = S2 - S**2 + S3 = S3 - S**3 - 3 * S * S2 + S4 = S4 - S**4 - 6 * S**2 * S2 - 4 * S * S3 + + output_file.create_dataset("S", data=S, compression=None) + output_file.create_dataset("S2", data=S2, compression=None) + output_file.create_dataset("S3", data=S3, compression=None) + output_file.create_dataset("S4", data=S4, compression=None) + + del S3, S4 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent Scalar Fluxes + print("--------------working on scalar fluxes...") + start_time = time.time() + + UiS = np.array(input_fluid_file["UiS_struct"]) + + UiS = UiS - Ui * S + + output_file.create_dataset("UiS", data=UiS, compression=None) - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% - # Mean Velocities - print('--------------working on mean velocities...') + # %% Turbulent scalar variance fluxes + print("--------------working on scalar variance fluxes...") start_time = time.time() - UVW = np.array(input_file['UVW_struct']) - output_file.create_dataset('UVW', data=UVW, compression=None) + UiS2 = np.array(input_fluid_file["UiS2_struct"]) - print(f'Done in {time.time() - start_time:.2f} seconds.') + UiS2 = UiS2 - Ui * S2 - 2 * S * UiS - Ui * S**2 - #%% Reynolds Stresses - print('--------------working on Reynolds stresses...') + output_file.create_dataset("UiS2", data=UiS2, compression=None) + del UiS2 + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux transport + print("--------------working on scalar flux transport...") start_time = time.time() - Rij = np.array(input_file['UiUj_struct']) - Rij = Rij - (UVW[..., [0, 1, 2, 0, 0, 1]] * UVW[..., [0, 1, 2, 1, 2, 2]]) + UiUjS = np.array(input_fluid_file["UiUjS_struct"]) - output_file.create_dataset('Rij', data=Rij, compression=None) + UiUjS = ( + UiUjS + - Ui[..., [0, 1, 2, 0, 0, 1]] * UiS[..., [0, 1, 2, 1, 2, 2]] + - Ui[..., [0, 1, 2, 1, 2, 2]] * UiS[..., [0, 1, 2, 0, 0, 1]] + - S * UiUj[..., [0, 1, 2, 3, 4, 5]] + - S * Ui[..., [0, 1, 2, 0, 0, 1]] * Ui[..., [0, 1, 2, 1, 2, 2]] + ) - print(f'Done in {time.time() - start_time:.2f} seconds.') + output_file.create_dataset("UiUjS", data=UiUjS, compression=None) + del UiUjS + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Pressure and Its Moments - print('--------------working on pressure and its moments...') + # %% Pressure-scalar correlation + print("--------------working on pressure-scalar correlation...") start_time = time.time() - P = np.array(input_file['P_struct']) - P2 = np.array(input_file['P2_struct']) - P3 = np.array(input_file['P3_struct']) - P4 = np.array(input_file['P4_struct']) + PS = np.array(input_scalar_file["PS_struct"]) + + PS = PS - P * S + + output_file.create_dataset("PS", data=PS, compression=None) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar gradients + print("--------------working on mean scalar gradients...") + start_time = time.time() - P2 = P2 - P**2 - P3 = P3 - P**3 - 3 * P * P2 - P4 = P4 - P**4 - 6 * P**2 * P2 - 4 * P * P3 + dSdXj = np.array(input_scalar_file["dSdxj_struct"]) - output_file.create_dataset('P', data=P, compression=None) - output_file.create_dataset('P2', data=P2, compression=None) - output_file.create_dataset('P3', data=P3, compression=None) - output_file.create_dataset('P4', data=P4, compression=None) + output_file.create_dataset("dSdXj", data=dSdXj, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") - del P2, P3, P4 - print(f'Done in {time.time() - start_time:.2f} seconds.') + # %% Mean scalar eqn: convection + print("--------------working on mean scalar eqn: convection...") - #%% - print('--------------working on tripple products...') start_time = time.time() - - # Load relevant datasets from the HDF5 file - UiUjUk = np.array(input_file['UiUjUk_struct'][:]) - - # Perform calculations similar to Matlab - UiUjUk = UiUjUk - ( - UVW[:, :, :, [0, 1, 2, 0, 0, 0, 1, 0, 1, 0]] * - UVW[:, :, :, [0, 1, 2, 0, 0, 1, 1, 2, 2, 1]] * - UVW[:, :, :, [0, 1, 2, 1, 2, 1, 2, 2, 2, 2]] - ) - ( - UVW[:, :, :, [0, 1, 2, 0, 0, 0, 1, 0, 1, 0]] * - Rij[:, :, :, [0, 1, 2, 3, 4, 1, 5, 2, 2, 5]] - ) - ( - UVW[:, :, :, [0, 1, 2, 0, 0, 1, 1, 2, 2, 1]] * - Rij[:, :, :, [0, 1, 2, 3, 4, 3, 5, 4, 5, 4]] - ) - ( - UVW[:, :, :, [0, 1, 2, 1, 2, 1, 2, 2, 2, 2]] * - Rij[:, :, :, [0, 1, 2, 0, 0, 3, 1, 4, 5, 3]] + + S_convection = np.sum(Ui * dSdXj, axis=-1) + + output_file.create_dataset("S_convection", data=S_convection, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: turbulent diffusion + print("--------------working on mean scalar eqn: turbulent diffusion...") + start_time = time.time() + + dUiSdXj = np.array(input_scalar_file["dUiSdxj_struct"]) + dUiSdXj = ( + dUiSdXj + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + - S * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] ) - - # Save result into HDF5 file - output_file.create_dataset('UiUjUk', data=UiUjUk) - print(f'Done in {time.time() - start_time:.2f} seconds.') - #%% - print('--------------working on velocity kurtosis...') + S_turb_diffusion = -np.sum(dUiSdXj[:, 0:3], axis=-1) + + output_file.create_dataset( + "S_turb_diffusion", data=S_turb_diffusion, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: molecular diffusion + print("--------------working on mean scalar eqn: molecular diffusion...") + start_time = time.time() + + d2SdXj2 = np.array(input_scalar_file["d2Sdxj2_struct"]) + + S_molecular_diffusion = (1 / (Rer_here * Prt_here)) * np.sum(d2SdXj2, axis=-1) + + output_file.create_dataset( + "S_molecular_diffusion", data=S_molecular_diffusion, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Mean scalar eqn: residual + print("--------------working on mean scalar eqn: residual...") start_time = time.time() - # Load Ui4 from the input file - Ui4 = np.array(input_file['Ui4_struct'][:]) - - # Perform kurtosis calculation - Ui4 = Ui4 - UVW**4 - 6 * UVW**2 * Rij[:, :, :, 0:3] - 4 * UVW * UiUjUk[:, :, :, 0:3] - - # Save result into HDF5 file - output_file.create_dataset('Ui4', data=Ui4) - del UiUjUk, Ui4 - print(f'Done in {time.time() - start_time:.2f} seconds.') + S_residual = S_turb_diffusion + S_molecular_diffusion - S_convection + + output_file.create_dataset("S_residual", data=S_residual, compression=None) + del ( + S_convection, + S_turb_diffusion, + S_molecular_diffusion, + S_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: convection + print("--------------working on scalar variance eqn: convection...") + start_time = time.time() + + dS2dXj = np.array(input_scalar_file["dS2dxj_struct"]) + S2_convection = np.sum(Ui * dS2dXj, axis=-1) + + output_file.create_dataset( + "S2_convection", data=S2_convection, compression=None + ) + del dS2dXj + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: production + print("--------------working on scalar variance eqn: production...") + start_time = time.time() + + S2_production = -2 * np.sum(UiS * dSdXj, axis=-1) + + output_file.create_dataset( + "S2_production", data=S2_production, compression=None + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: turbulent diffusion + print("--------------working on scalar variance eqn: turbulent diffusion...") + start_time = time.time() + + dUiS2dXj = np.array(input_scalar_file["dUiS2dxj_struct"]) + dUiS2dXj = ( + dUiS2dXj + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + - 2 * UiS * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + - 2 * S * dUiSdXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + ) + + S2_turb_diffusion = -np.sum(dUiS2dXj[:, 0:3], axis=-1) + + output_file.create_dataset( + "S2_turb_diffusion", data=S2_turb_diffusion, compression=None + ) + del dUiS2dXj + print(f"Done in {time.time() - 0:.2f} seconds.") + + # %% Scalar variance eqn: molecular diffusion + print("--------------working on scalar variance eqn: molecular diffusion...") + start_time = time.time() + + d2S2dXi2 = np.array(input_scalar_file["d2S2dxj2_struct"]) + d2S2dXi2 = d2S2dXi2 - 2 * dSdXj**2 - 2 * S * d2SdXj2 + + S2_molecular_diffusion = 1 / (Rer_here * Prt_here) * np.sum(d2S2dXi2, axis=-1) + + output_file.create_dataset( + "S2_molecular_diffusion", data=S2_molecular_diffusion, compression=None + ) + del d2S2dXi2, d2SdXj2 + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% + # %% Scalar variance eqn: dissipation + print("--------------working on scalar variance eqn: dissipation...") + start_time = time.time() + + S2Diss = np.array(input_scalar_file["SDiss_struct"])[..., 0] + S2Diss = S2Diss - np.sum(dSdXj**2, axis=-1) + + S2_dissipation = -2 * (1 / (Rer_here * Prt_here)) * S2Diss + + output_file.create_dataset( + "S2_dissipation", data=S2_dissipation, compression=None + ) + del S2Diss + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar variance eqn: residual + print("--------------working on scalar variance eqn: residual...") + start_time = time.time() + + S2_residual = ( + S2_production + + S2_turb_diffusion + + S2_molecular_diffusion + + S2_dissipation + - S2_convection + ) + + output_file.create_dataset("S2_residual", data=S2_residual, compression=None) + del ( + S2_production, + S2_turb_diffusion, + S2_molecular_diffusion, + S2_dissipation, + S2_convection, + S2_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: convection + print("--------------working on turbulent scalar flux eqn: convection...") + start_time = time.time() + + UiS_convection = np.zeros((*Nxyz, 3)) + UiS_convection[..., 0] = np.sum(Ui * dUiSdXj[..., :3], axis=-1) + UiS_convection[..., 1] = np.sum(Ui * dUiSdXj[..., 3:6], axis=-1) + UiS_convection[..., 2] = np.sum(Ui * dUiSdXj[..., 6:9], axis=-1) + + output_file.create_dataset( + "UiS_convection", data=UiS_convection, compression=None + ) + + # %% Turbulent scalar flux eqn: velocity gradient production + print( + "--------------working on turbulent scalar flux eqn: velocity gradient production..." + ) + start_time = time.time() + + UiS_velocity_gradient_production = np.zeros((*Nxyz, 3)) + UiS_velocity_gradient_production[..., 0] = -np.sum( + UiS * dUidXj[..., [0, 3, 6]], axis=-1 + ) + UiS_velocity_gradient_production[..., 1] = -np.sum( + UiS * dUidXj[..., [1, 4, 7]], axis=-1 + ) + UiS_velocity_gradient_production[..., 2] = -np.sum( + UiS * dUidXj[..., [2, 5, 8]], axis=-1 + ) + + output_file.create_dataset( + "UiS_velocity_gradient_production", + data=UiS_velocity_gradient_production, + compression=None, + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: scalar gradient production + print( + "--------------working on turbulent scalar flux eqn: scalar gradient production..." + ) + start_time = time.time() + + UiS_scalar_gradient_production = np.zeros((*Nxyz, 3)) + UiS_scalar_gradient_production[..., 0] = -np.sum( + UiUj[..., [0, 3, 4]] * dSdXj, axis=-1 + ) + UiS_scalar_gradient_production[..., 1] = -np.sum( + UiUj[..., [3, 1, 5]] * dSdXj, axis=-1 + ) + UiS_scalar_gradient_production[..., 2] = -np.sum( + UiUj[..., [4, 5, 2]] * dSdXj, axis=-1 + ) + + output_file.create_dataset( + "UiS_scalar_gradient_production", data=UiS_scalar_gradient_production + ) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: turbulent diffusion + print( + "--------------working on turbulent scalar flux eqn: turbulent diffusion..." + ) + start_time = time.time() + + dUiUjSdXk = np.array(input_scalar_file["dUiUjSdxk_struct"]) + + inds_i = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]) + inds_j = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]) + inds_k = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) + inds_didk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]) + inds_djdk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]) + inds_ij = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]) + dUiUjSdXk = ( + dUiUjSdXk + - Ui[..., inds_i] * Ui[..., inds_j] * dSdXj[..., inds_k] + - Ui[..., inds_j] * S * dUidXj[..., inds_didk] + - Ui[..., inds_i] * S * dUidXj[..., inds_djdk] + - Ui[..., inds_j] * dUiSdXj[..., inds_didk] + - UiS[..., inds_i] * dUidXj[..., inds_djdk] + - Ui[..., inds_i] * dUiSdXj[..., inds_djdk] + - UiS[..., inds_j] * dUidXj[..., inds_didk] + - S * dUiUjdXk + - UiUj[..., inds_ij] * dSdXj[..., inds_k] + ) + + UiS_turb_diffusion = np.zeros((*Nxyz, 3)) + for i in range(3): + inds = np.where((inds_i == i) & (inds_j == inds_k))[0] + UiS_turb_diffusion[..., i] = -np.sum(dUiUjSdXk[..., inds], axis=-1) + + output_file.create_dataset( + "UiS_turb_diffusion", data=UiS_turb_diffusion, compression=None + ) + del dUiUjSdXk + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Turbulent scalar flux eqn: viscous diffusion + print( + "--------------working on turbulent scalar flux eqn: viscous diffusion..." + ) + start_time = time.time() + + # %% Turbulent scalar flux eqn: molecular diffusion + + # %% Turbulent scalar flux eqn: scalar-pressure correlation gradient + + # %% Turbulent scalar flux eqn: pressure-scalar gradient correlation + + # %% Turbulent scalar flux eqn: dissipation + + # %% Turbulent scalar flux eqn: residual + + # %% Pressure-scalar gradient correlation + print("--------------working on pressure-scalar gradient correlation...") + start_time = time.time() + + PdSdXi = np.array(input_scalar_file["PdSdxi_struct"]) + + PdSdXi = PdSdXi - P * dSdXj + + output_file.create_dataset("PdSdXi", data=PdSdXi, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Velocity-scalar gradient correlation + print("--------------working on velocity-scalar gradient correlation...") + start_time = time.time() + + UidSdxj = np.array(input_scalar_file["UidSdxj_struct"]) + + UidSdxj = ( + UidSdxj + - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + ) + + output_file.create_dataset("UidSdxj", data=UidSdxj, compression=None) + print(f"Done in {time.time() - start_time:.2f} seconds.") + + # %% Scalar-velocity gradient correlation + print("--------------working on scalar-velocity gradient correlation...") + start_time = time.time() + + SdUidXj = np.array(input_scalar_file["SdUidxj_struct"]) + + SdUidXj = SdUidXj - S * dUidXj + + # %% Scalar dissipation + + # SDiss_vec = np.column_stack([load_field(i) for i in range(38, 42)]) + # d2Sdx2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) + # dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 57)]) + # dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(57, 60)]) + # dPSdxj_vec = np.column_stack([load_field(i) for i in range(66, 69)]) + + # %% print("--------------working on velocity gradients...") start_time = time.time() - + # Read velocity gradient tensor from input file dUidXj = np.array(input_file["dUidxj_struct"][()]) - + # Save to output file output_file.create_dataset("dUidXj", data=dUidXj) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% + # %% print("--------------working on momentum convection terms...") start_time = time.time() - # Reshape dUidXj dUidXj_reshaped = dUidXj.reshape(Nxyz[0], Nxyz[1], Nxyz[2], 3, 3, order="F") @@ -308,22 +716,30 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset("Momentum_convection", data=Momentum_convection) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% + # %% print("--------------working on the residual momentum convection terms...") start_time = time.time() # Compute Residual Momentum convection terms Momentum_convectionRes = np.zeros((*Nxyz, 3)) # Shape: (N1, N2, N3, 3) - Momentum_convectionRes[..., 0] = np.sum(UVW[..., [1, 2]] * dUidXj[..., [1, 2]], axis=-1) - Momentum_convectionRes[..., 1] = np.sum(UVW[..., [0, 2]] * dUidXj[..., [3, 5]], axis=-1) - Momentum_convectionRes[..., 2] = np.sum(UVW[..., [0, 1]] * dUidXj[..., [6, 7]], axis=-1) + Momentum_convectionRes[..., 0] = np.sum( + UVW[..., [1, 2]] * dUidXj[..., [1, 2]], axis=-1 + ) + Momentum_convectionRes[..., 1] = np.sum( + UVW[..., [0, 2]] * dUidXj[..., [3, 5]], axis=-1 + ) + Momentum_convectionRes[..., 2] = np.sum( + UVW[..., [0, 1]] * dUidXj[..., [6, 7]], axis=-1 + ) # Save to output file - output_file.create_dataset("Momentum_convectionRes", data=Momentum_convectionRes) + output_file.create_dataset( + "Momentum_convectionRes", data=Momentum_convectionRes + ) del Momentum_convectionRes print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% + # %% print("--------------working on momentum pressure terms...") print("Warning: assuming rho=1 everywhere!") start_time = time.time() @@ -335,8 +751,8 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset("Momentum_pressure", data=Momentum_pressure) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% - print('--------------working on production terms...') + # %% + print("--------------working on production terms...") start_time = time.time() Prod_ij = np.zeros((*dUidXj.shape[:3], 6)) @@ -344,30 +760,41 @@ def calculate_scalar_budgets_in_Cartesian( Prod_ij[..., 0] = -2 * np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., :3], axis=3) Prod_ij[..., 1] = -2 * np.sum(Rij[..., [3, 1, 5]] * dUidXj[..., 3:6], axis=3) Prod_ij[..., 2] = -2 * np.sum(Rij[..., [4, 5, 2]] * dUidXj[..., 6:9], axis=3) - Prod_ij[..., 3] = -np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., 3:6] + - Rij[..., [3, 1, 5]] * dUidXj[..., :3], axis=3) - Prod_ij[..., 4] = -np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., 6:9] + - Rij[..., [4, 5, 2]] * dUidXj[..., :3], axis=3) - Prod_ij[..., 5] = -np.sum(Rij[..., [3, 1, 5]] * dUidXj[..., 6:9] + - Rij[..., [4, 5, 2]] * dUidXj[..., 3:6], axis=3) + Prod_ij[..., 3] = -np.sum( + Rij[..., [0, 3, 4]] * dUidXj[..., 3:6] + + Rij[..., [3, 1, 5]] * dUidXj[..., :3], + axis=3, + ) + Prod_ij[..., 4] = -np.sum( + Rij[..., [0, 3, 4]] * dUidXj[..., 6:9] + + Rij[..., [4, 5, 2]] * dUidXj[..., :3], + axis=3, + ) + Prod_ij[..., 5] = -np.sum( + Rij[..., [3, 1, 5]] * dUidXj[..., 6:9] + + Rij[..., [4, 5, 2]] * dUidXj[..., 3:6], + axis=3, + ) TKE_prod = np.sum(Prod_ij[..., :3], axis=3) / 2 - output_file.create_dataset('Prod_ij', data=Prod_ij) - output_file.create_dataset('TKE_prod', data=TKE_prod) + output_file.create_dataset("Prod_ij", data=Prod_ij) + output_file.create_dataset("TKE_prod", data=TKE_prod) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% - print('--------------working on convection terms...') + # %% + print("--------------working on convection terms...") start_time = time.time() - dRij_dxk = np.array(input_file['dUiUjdx_struct'][:]) + dRij_dxk = np.array(input_file["dUiUjdx_struct"][:]) - dRij_dxk = dRij_dxk - \ - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] * \ - dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] - \ - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] * \ - dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + dRij_dxk = ( + dRij_dxk + - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + ) Conv_ij = np.zeros((*UVW.shape[:3], 6)) @@ -381,12 +808,12 @@ def calculate_scalar_budgets_in_Cartesian( Conv_ij = -Conv_ij TKE_conv = np.sum(Conv_ij[..., :3], axis=3) / 2 - output_file.create_dataset('Conv_ij', data=Conv_ij) - output_file.create_dataset('TKE_conv', data=TKE_conv) + output_file.create_dataset("Conv_ij", data=Conv_ij) + output_file.create_dataset("TKE_conv", data=TKE_conv) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% - print('--------------working on momentum turbulent diffusion terms...') + # %% + print("--------------working on momentum turbulent diffusion terms...") start_time = time.time() Momentum_turb_diffusion = np.zeros((*dRij_dxk.shape[:3], 3)) @@ -395,13 +822,15 @@ def calculate_scalar_budgets_in_Cartesian( Momentum_turb_diffusion[..., 1] = -np.sum(dRij_dxk[..., [9, 4, 17]], axis=3) Momentum_turb_diffusion[..., 2] = -np.sum(dRij_dxk[..., [12, 16, 8]], axis=3) - output_file.create_dataset('Momentum_turb_diffusion', data=Momentum_turb_diffusion) + output_file.create_dataset( + "Momentum_turb_diffusion", data=Momentum_turb_diffusion + ) print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% - print('--------------working on dissipation terms...') + # %% + print("--------------working on dissipation terms...") start_time = time.time() - Diss_ij = np.array(input_file['pseudoDiss_struct'][()]) + Diss_ij = np.array(input_file["pseudoDiss_struct"][()]) # Compute dissipation terms Diss_ij[..., 0] -= np.sum(dUidXj[..., 0:3] ** 2, axis=-1) @@ -415,32 +844,34 @@ def calculate_scalar_budgets_in_Cartesian( TKE_diss = np.sum(Diss_ij[..., 0:3], axis=-1) / 2 # Save results to output HDF5 file - output_file.create_dataset('Diss_ij', data=Diss_ij, compression=None) - output_file.create_dataset('TKE_diss', data=TKE_diss, compression=None) + output_file.create_dataset("Diss_ij", data=Diss_ij, compression=None) + output_file.create_dataset("TKE_diss", data=TKE_diss, compression=None) del Diss_ij print(f"Time taken: {time.time() - start_time:.2f} seconds") - #%% - print('--------------working on turbulent transport...') + # %% + print("--------------working on turbulent transport...") start_time = time.time() # Define index mappings - ind_dRjkdxk = np.array([ - (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3])-1, - (np.array([4, 2, 6]) - 1) * 3 + np.array([1, 2, 3])-1, - (np.array([5, 6, 3]) - 1) * 3 + np.array([1, 2, 3])-1 - ]) + ind_dRjkdxk = np.array( + [ + (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3]) - 1, + (np.array([4, 2, 6]) - 1) * 3 + np.array([1, 2, 3]) - 1, + (np.array([5, 6, 3]) - 1) * 3 + np.array([1, 2, 3]) - 1, + ] + ) ind_tripple = np.zeros((3, 3, 3), dtype=int) - ind_tripple[:, 0, 0] = (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 0, 1] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 0, 2] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 1, 0] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 1, 1] = (np.array([6, 2, 7]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 1, 2] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 2, 0] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 2, 1] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3])-1 - ind_tripple[:, 2, 2] = (np.array([8, 9, 3]) - 1) * 3 + np.array([1, 2, 3])-1 + ind_tripple[:, 0, 0] = (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 0, 1] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 0, 2] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 1, 0] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 1, 1] = (np.array([6, 2, 7]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 1, 2] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 2, 0] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 2, 1] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3]) - 1 + ind_tripple[:, 2, 2] = (np.array([8, 9, 3]) - 1) * 3 + np.array([1, 2, 3]) - 1 # Allocate memory TurbTrans_ij = np.zeros((*Nxyz, 3, 3), dtype=np.float32) @@ -449,20 +880,26 @@ def calculate_scalar_budgets_in_Cartesian( for i in range(3): for j in range(3): TurbTrans_ij[..., i, j] = ( - - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[0, i, j]]) - - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[1, i, j]]) - - np.array(input_file['dUiUjUkdx_struct'][..., ind_tripple[2, i, j]]) + -np.array(input_file["dUiUjUkdx_struct"][..., ind_tripple[0, i, j]]) + - np.array( + input_file["dUiUjUkdx_struct"][..., ind_tripple[1, i, j]] + ) + - np.array( + input_file["dUiUjUkdx_struct"][..., ind_tripple[2, i, j]] + ) ) TurbTrans_ij[..., i, j] += ( - UVW[..., i] * np.sum(UVW * dUidXj[..., (np.arange(3) + j * 3)], axis=-1) - + UVW[..., j] * np.sum(UVW * dUidXj[..., (np.arange(3) + i * 3)], axis=-1) + UVW[..., i] + * np.sum(UVW * dUidXj[..., (np.arange(3) + j * 3)], axis=-1) + + UVW[..., j] + * np.sum(UVW * dUidXj[..., (np.arange(3) + i * 3)], axis=-1) + UVW[..., i] * np.sum(dRij_dxk[..., ind_dRjkdxk[j, :]], axis=-1) + UVW[..., j] * np.sum(dRij_dxk[..., ind_dRjkdxk[i, :]], axis=-1) ) # Reorder indices - TurbTrans_ij = TurbTrans_ij.reshape(TurbTrans_ij.shape[:-2] + (-1,) , order="F") + TurbTrans_ij = TurbTrans_ij.reshape(TurbTrans_ij.shape[:-2] + (-1,), order="F") TurbTrans_ij = TurbTrans_ij[..., [0, 4, 8, 1, 2, 5]] # Final computation @@ -470,21 +907,23 @@ def calculate_scalar_budgets_in_Cartesian( TKE_turbTrans = np.sum(TurbTrans_ij[..., 0:3], axis=-1) / 2 # Save results - output_file.create_dataset('TurbTrans_ij', data=TurbTrans_ij, compression=None) - output_file.create_dataset('TKE_turbTrans', data=TKE_turbTrans, compression=None) + output_file.create_dataset("TurbTrans_ij", data=TurbTrans_ij, compression=None) + output_file.create_dataset( + "TKE_turbTrans", data=TKE_turbTrans, compression=None + ) # Cleanup del TurbTrans_ij, Conv_ij, Prod_ij, dRij_dxk - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% - print('--------------working on viscous diffusion...') + # %% + print("--------------working on viscous diffusion...") start_time = time.time() # Read datasets - d2Ui_dx2 = np.array(input_file['d2Uidx2_struct']) - d2Rij_dx2 = np.array(input_file['d2UiUjdx2_struct']) + d2Ui_dx2 = np.array(input_file["d2Uidx2_struct"]) + d2Rij_dx2 = np.array(input_file["d2UiUjdx2_struct"]) # Compute d2Rij_dx2 d2Rij_dx2 = ( @@ -493,111 +932,152 @@ def calculate_scalar_budgets_in_Cartesian( * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] - - 2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] - * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + - 2 + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] + * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] ) # Compute viscous diffusion - ViscDiff_ij = (1 / Rer_here) * np.sum(d2Rij_dx2.reshape((*Nxyz, 3, -1), order="F"), axis=3) + ViscDiff_ij = (1 / Rer_here) * np.sum( + d2Rij_dx2.reshape((*Nxyz, 3, -1), order="F"), axis=3 + ) # Compute TKE viscous diffusion TKE_ViscDiff = np.sum(ViscDiff_ij[..., 0:3], axis=-1) / 2 # Save results - output_file.create_dataset('ViscDiff_ij', data=ViscDiff_ij, compression=None) - output_file.create_dataset('TKE_ViscDiff', data=TKE_ViscDiff, compression=None) + output_file.create_dataset("ViscDiff_ij", data=ViscDiff_ij, compression=None) + output_file.create_dataset("TKE_ViscDiff", data=TKE_ViscDiff, compression=None) # Cleanup del d2Rij_dx2, ViscDiff_ij - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Momentum viscous diffusion terms - print('--------------working on momentum viscous diffusion terms...') + # %% Momentum viscous diffusion terms + print("--------------working on momentum viscous diffusion terms...") start_time = time.time() - Momentum_viscous_diffusion = (1 / Rer_here) * \ - np.sum(d2Ui_dx2.reshape(*Nxyz, 3, 3, order="F"), axis=3) + Momentum_viscous_diffusion = (1 / Rer_here) * np.sum( + d2Ui_dx2.reshape(*Nxyz, 3, 3, order="F"), axis=3 + ) - output_file.create_dataset('Momentum_viscous_diffusion', data=Momentum_viscous_diffusion, compression=None) + output_file.create_dataset( + "Momentum_viscous_diffusion", + data=Momentum_viscous_diffusion, + compression=None, + ) - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Momentum residual terms - print('--------------working on momentum residual terms...') + # %% Momentum residual terms + print("--------------working on momentum residual terms...") start_time = time.time() Momentum_residual = ( - Momentum_viscous_diffusion + Momentum_viscous_diffusion + Momentum_turb_diffusion + Momentum_pressure - Momentum_convection ) - output_file.create_dataset('Momentum_residual', data=Momentum_residual, compression=None) + output_file.create_dataset( + "Momentum_residual", data=Momentum_residual, compression=None + ) # Cleanup - del Momentum_viscous_diffusion, Momentum_turb_diffusion, \ - Momentum_pressure, Momentum_convection, Momentum_residual + del ( + Momentum_viscous_diffusion, + Momentum_turb_diffusion, + Momentum_pressure, + Momentum_convection, + Momentum_residual, + ) - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Pressure-rate-of-strain terms - print('--------------working on pressure-rate-of-strain terms...') + # %% Pressure-rate-of-strain terms + print("--------------working on pressure-rate-of-strain terms...") start_time = time.time() - PRS_ij = np.array(input_file['PGij_struct']) + PRS_ij = np.array(input_file["PGij_struct"]) PRS_ij = PRS_ij - P * dUidXj PRS_ij = PRS_ij[..., [0, 4, 8, 1, 2, 5]] + PRS_ij[..., [0, 4, 8, 3, 6, 7]] - output_file.create_dataset('PRS_ij', data=PRS_ij, compression=None) + output_file.create_dataset("PRS_ij", data=PRS_ij, compression=None) - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% - print('--------------working on pressure transport terms...') + # %% + print("--------------working on pressure transport terms...") start_time = time.time() - dpdx = np.array(input_file['dPdx_struct']) - dpudx = np.array(input_file['dPUidxj_struct']) + dpdx = np.array(input_file["dPdx_struct"]) + dpudx = np.array(input_file["dPUidxj_struct"]) - dpudx = dpudx - (P * dUidXj) \ - - (UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] * dpdx[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + dpudx = ( + dpudx + - (P * dUidXj) + - ( + UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + * dpdx[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + ) + ) - PressTrans_ij = -(dpudx[..., [0, 4, 8, 1, 2, 5]] + dpudx[..., [0, 4, 8, 3, 6, 7]]) + PressTrans_ij = -( + dpudx[..., [0, 4, 8, 1, 2, 5]] + dpudx[..., [0, 4, 8, 3, 6, 7]] + ) - output_file.create_dataset('dpdx', data=dpdx, compression=None) - output_file.create_dataset('PressTrans_ij', data=PressTrans_ij, compression=None) + output_file.create_dataset("dpdx", data=dpdx, compression=None) + output_file.create_dataset( + "PressTrans_ij", data=PressTrans_ij, compression=None + ) del dpudx - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% Velocity-Pressure Gradient Terms - print('--------------working on velocity-pressure-gradient terms...') + # %% Velocity-Pressure Gradient Terms + print("--------------working on velocity-pressure-gradient terms...") start_time = time.time() VelPressGrad_ij = PressTrans_ij - PRS_ij TKE_VelPressGrad = np.sum(VelPressGrad_ij[..., 0:3], axis=-1) / 2 - output_file.create_dataset('VelPressGrad_ij', data=VelPressGrad_ij, compression=None) - output_file.create_dataset('TKE_VelPressGrad', data=TKE_VelPressGrad, compression=None) + output_file.create_dataset( + "VelPressGrad_ij", data=VelPressGrad_ij, compression=None + ) + output_file.create_dataset( + "TKE_VelPressGrad", data=TKE_VelPressGrad, compression=None + ) del PRS_ij, PressTrans_ij, VelPressGrad_ij - print(f'Done in {time.time() - start_time:.2f} seconds.') + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% TKE budget residual - print('--------------working on TKE budgets...') + # %% TKE budget residual + print("--------------working on TKE budgets...") start_time = time.time() TKE_residual = ( - TKE_prod + TKE_diss + TKE_turbTrans + TKE_ViscDiff + TKE_VelPressGrad + TKE_conv + TKE_prod + + TKE_diss + + TKE_turbTrans + + TKE_ViscDiff + + TKE_VelPressGrad + + TKE_conv ) - output_file.create_dataset('TKE_residual', data=TKE_residual, compression=None) + output_file.create_dataset("TKE_residual", data=TKE_residual, compression=None) - del TKE_prod, TKE_diss, TKE_turbTrans, TKE_ViscDiff, \ - TKE_VelPressGrad, TKE_conv, TKE_residual - print(f'Done in {time.time() - start_time:.2f} seconds.') + del ( + TKE_prod, + TKE_diss, + TKE_turbTrans, + TKE_ViscDiff, + TKE_VelPressGrad, + TKE_conv, + TKE_residual, + ) + print(f"Done in {time.time() - start_time:.2f} seconds.") - #%% + # %% print("All computations completed and data saved successfully.") From f333e62f747447e26120a68cbac43400e42f532b Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Mon, 23 Feb 2026 12:57:07 +0100 Subject: [PATCH 06/13] First draft of scalar stats complete --- .../statistics/passive_scalar_budgets.py | 580 ++++++++++++++-- ...calar_budgets_interpolatedPoints_notMPI.py | 635 ++++++------------ 2 files changed, 710 insertions(+), 505 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index a5a4a15..a270710 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -271,11 +271,34 @@ def compute_and_write_additional_sstat_fields( file_keys_UiS = ["vel_0", "vel_1", "vel_2"] file_keys_SS = ["temp"] # "USS" "VSS" "WSS" - file_keys_UjSS = ["scal_2", "scal_3", "scal_4"] + file_keys_UiSS = ["scal_2", "scal_3", "scal_4"] # "UUS" "VVS" "WWS" "UVS" "UWS" "VWS" file_keys_UiUjS = ["scal_5", "scal_6", "scal_7", "scal_8", "scal_9", "scal_10"] file_keys_PS = ["scal_11"] + file_keys_UidSdxj = [ + "scal_15", + "scal_16", + "scal_17", + "scal_18", + "scal_19", + "scal_20", + "scal_21", + "scal_22", + "scal_23", + ] + file_keys_SdUidxj = [ + "scal_24", + "scal_25", + "scal_26", + "scal_27", + "scal_28", + "scal_29", + "scal_30", + "scal_31", + "scal_32", + ] + ########################################################################################### # S first and second derivatives ########################################################################################### @@ -377,7 +400,7 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) - this_file_name = which_dir + "/dUSdx" + this_ext + this_file_name = which_dir + "/dUiSdxj" + this_ext write_file_9c( comm, msh, @@ -391,7 +414,7 @@ def compute_and_write_additional_sstat_fields( stat_fields.clear() ########################################################################################### - # UiSS first derivative + # UiSS divergence ########################################################################################### stat_fields.add_field( @@ -407,7 +430,7 @@ def compute_and_write_additional_sstat_fields( field_name="VSS", file_type="fld", file_name=full_fname_stat, - file_key=file_keys_UiS[1], + file_key=file_keys_UiSS[1], dtype=np.single, ) stat_fields.add_field( @@ -434,50 +457,179 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) - this_file_name = which_dir + "/dUSSdx" + this_ext - write_file_9c( + this_file_name = which_dir + "/dUjSSdxj" + this_ext + write_file_3c( comm, msh, - dQ1_dxi, - dQ2_dxi, - dQ3_dxi, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, this_file_name, if_write_mesh=if_write_mesh, ) stat_fields.clear() - del dQ2_dxi, dQ3_dxi ########################################################################################### - # UiUjS first derivative + # UiUjS gradients: dUiUjS/dxj ########################################################################################### - actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] + # dUUjS/dxj - for icomp in range(0, 6): - if comm.Get_rank() == 0: - print("working on: " + actual_field_names[icomp]) + stat_fields.add_field( + comm, + field_name="UUS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[4], + dtype=np.single, + ) - stat_fields.add_field( - comm, - field_name=actual_field_names[icomp], - file_type="fld", - file_name=full_fname_stat, - file_key=file_keys_UiUjS[icomp], - dtype=np.single, - ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UUS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UVS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UWS"], dQ3_dxi + ) - compute_scalar_first_derivative( - comm, msh, coef, stat_fields.registry[actual_field_names[icomp]], dQ1_dxi - ) + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) - if if_do_dssum_on_derivatives: - do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + this_file_name = which_dir + "/dUUjSdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) - this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) + stat_fields.clear() - stat_fields.clear() + # dVUjS/dxj + + stat_fields.add_field( + comm, + field_name="UVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VVS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UVS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VVS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VWS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dVUjSdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # dWUjS/dxj + + stat_fields.add_field( + comm, + field_name="UWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[5], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WWS", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UiUjS[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UWS"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VWS"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WWS"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dWUjSdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() ########################################################################################### # PS first derivative @@ -498,45 +650,340 @@ def compute_and_write_additional_sstat_fields( if if_do_dssum_on_derivatives: do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) - this_file_name = which_dir + "/dPSdx" + this_ext + this_file_name = which_dir + "/dPSdxj" + this_ext write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) stat_fields.clear() - del dQ1_dxi - if comm.Get_rank() == 0: - print("-------As a great man once said: run successful: dying ...") + ########################################################################################### + # UidSdxj gradients: d(UidS/dxj)/dxj + ########################################################################################### + + # d(UdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="UdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="UdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[2], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["UdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dUdSdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(VdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="VdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="VdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["VdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dVdSdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(WdS/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="WdSdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[6], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WdSdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[7], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="WdSdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[8], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["WdSdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dWdSdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() ########################################################################################### - # SdUidxj first derivative + # SdUidxj gradients: d(SdUi/dxj)/dxj ########################################################################################### - actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] + # d(SdU/dxj)/dxj - for icomp in range(0, 6): - if comm.Get_rank() == 0: - print("working on: " + actual_field_names[icomp]) + stat_fields.add_field( + comm, + field_name="SdUdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[0], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdUdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[1], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdUdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[2], + dtype=np.single, + ) - stat_fields.add_field( - comm, - field_name=actual_field_names[icomp], - file_type="fld", - file_name=full_fname_stat, - file_key=file_keys_UiUjS[icomp], - dtype=np.single, - ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdUdz"], dQ3_dxi + ) - compute_scalar_first_derivative( - comm, msh, coef, stat_fields.registry[actual_field_names[icomp]], dQ1_dxi - ) + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dSdUdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(SdV/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="SdVdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[3], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdVdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[4], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdVdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_SdUidxj[5], + dtype=np.single, + ) + + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdVdz"], dQ3_dxi + ) + + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) + + this_file_name = which_dir + "/dSdVdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + # d(SdW/dxj)/dxj + + stat_fields.add_field( + comm, + field_name="SdWdx", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[6], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdWdy", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[7], + dtype=np.single, + ) + stat_fields.add_field( + comm, + field_name="SdWdz", + file_type="fld", + file_name=full_fname_stat, + file_key=file_keys_UidSdxj[8], + dtype=np.single, + ) - if if_do_dssum_on_derivatives: - do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdx"], dQ1_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdy"], dQ2_dxi + ) + compute_scalar_first_derivative( + comm, msh, coef, stat_fields.registry["SdWdz"], dQ3_dxi + ) - this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - write_file_3c(comm, msh, dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh) + if if_do_dssum_on_derivatives: + do_dssum_on_3comp_vector(dQ1_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ2_dxi, msh_conn, msh) + do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) - stat_fields.clear() + this_file_name = which_dir + "/dSdWdxjdxj" + this_ext + write_file_3c( + comm, + msh, + dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + this_file_name, + if_write_mesh=if_write_mesh, + ) + + stat_fields.clear() + + del dQ1_dxi, dQ2_dxi, dQ3_dxi + if comm.Get_rank() == 0: + print("-------As a great man once said: run successful: dying ...") ########################################################################################### @@ -618,18 +1065,21 @@ def interpolate_all_stat_and_sstat_fields_onto_points( [ which_dir + "/dnSdxn" + this_ext, which_dir + "/dnSSdxn" + this_ext, - which_dir + "/dUSdx" + this_ext, - which_dir + "/dUSSdx" + this_ext, + which_dir + "/dUiSdxj" + this_ext, + which_dir + "/dUjSSdxj" + this_ext, + which_dir + "/dUUjSdxj" + this_ext, + which_dir + "/dVUjSdxj" + this_ext, + which_dir + "/dWUjSdxj" + this_ext, + which_dir + "/dPSdxj" + this_ext, + which_dir + "/dUdSdxjdxj" + this_ext, + which_dir + "/dVdSdxjdxj" + this_ext, + which_dir + "/dWdSdxjdxj" + this_ext, + which_dir + "/dSdUdxjdxj" + this_ext, + which_dir + "/dSdVdxjdxj" + this_ext, + which_dir + "/dSdWdxjdxj" + this_ext, ] ) - actual_field_names = ["UUS", "VVS", "WWS", "UVS", "UWS", "VWS"] - for icomp in range(0, 6): - this_file_name = which_dir + "/d" + actual_field_names[icomp] + "dx" + this_ext - these_names.append(this_file_name) - - these_names.append(which_dir + "/dPSdx" + this_ext) - # if comm.Get_rank() == 0: # print(these_names) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index 3b4f950..cce5995 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -4,6 +4,9 @@ ##################################################################################### # function to read and store the interpolated fields as structured and averaged fields ##################################################################################### +import time + + def read_interpolated_scalar_stat_hdf5_fields( path_to_files, Reynolds_number, @@ -83,9 +86,11 @@ def load_field(num, prefix="interpolated_scalar_fields"): dS2dxj_vec = np.column_stack([load_field(i) for i in range(48, 51)]) d2S2dxj2_vec = np.column_stack([load_field(i) for i in range(51, 54)]) dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 63)]) - dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(63, 72)]) - dUiUjSdxk_vec = np.column_stack([load_field(i) for i in range(72, 90)]) - dPSdxj_vec = np.column_stack([load_field(i) for i in range(90, 93)]) + dUjS2dxj_vec = np.column_stack([load_field(63)]) + dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(64, 67)]) + dPSdxj_vec = np.column_stack([load_field(i) for i in range(67, 70)]) + dUidSdxjdxj_vec = np.column_stack([load_field(i) for i in range(70, 73)]) + dSdUidxjdxj_vec = np.column_stack([load_field(i) for i in range(73, 76)]) print("Finished reading all fields.") print(f"Done in {time.time() - start_time:.2f} seconds.") @@ -246,14 +251,22 @@ def calculate_scalar_budgets_in_Cartesian( print(f"Done in {time.time() - start_time:.2f} seconds.") - # %% Mean velocity derivatives - print("--------------loading mean velocity derivatives...") + # %% Mean velocity gradients + print("--------------loading mean velocity gradients...") start_time = time.time() dUidXj = np.array(input_fluid_file["dUidxj_struct"]) print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% Mean velocity second derivatives + print("--------------loading mean velocity second derivatives...") + start_time = time.time() + + d2UidXj2 = np.array(input_fluid_file["d2Uidx2_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% Pressure print("--------------loading pressure...") start_time = time.time() @@ -262,6 +275,14 @@ def calculate_scalar_budgets_in_Cartesian( print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% Pressure gradients + print("--------------loading pressure gradients...") + start_time = time.time() + + dPdXj = np.array(input_fluid_file["dPdxj_struct"]) + + print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% Reynolds stresses print("--------------loading Reynolds stresses...") start_time = time.time() @@ -379,6 +400,8 @@ def calculate_scalar_budgets_in_Cartesian( S_convection = np.sum(Ui * dSdXj, axis=-1) + S_convection = -S_convection # Put on RHS + output_file.create_dataset("S_convection", data=S_convection, compression=None) print(f"Done in {time.time() - start_time:.2f} seconds.") @@ -420,7 +443,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on mean scalar eqn: residual...") start_time = time.time() - S_residual = S_turb_diffusion + S_molecular_diffusion - S_convection + S_residual = S_turb_diffusion + S_molecular_diffusion + S_convection output_file.create_dataset("S_residual", data=S_residual, compression=None) del ( @@ -438,6 +461,8 @@ def calculate_scalar_budgets_in_Cartesian( dS2dXj = np.array(input_scalar_file["dS2dxj_struct"]) S2_convection = np.sum(Ui * dS2dXj, axis=-1) + S2_convection = -S2_convection # Put on RHS + output_file.create_dataset( "S2_convection", data=S2_convection, compression=None ) @@ -460,25 +485,36 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on scalar variance eqn: turbulent diffusion...") start_time = time.time() - dUiS2dXj = np.array(input_scalar_file["dUiS2dxj_struct"]) - dUiS2dXj = ( - dUiS2dXj - - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - - 2 * UiS * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - - 2 * S * dUiSdXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - ) - - S2_turb_diffusion = -np.sum(dUiS2dXj[:, 0:3], axis=-1) + dUjS2dXj = np.array(input_scalar_file["dUjS2dxj_struct"]) + + dirac = [0, 4, 8] + dUjS2dXj = dUjS2dXj - np.sum( + Ui * dS2dXj + + S2 * dUidXj[..., dirac] + + 2 * UiS * dSdXj + + 2 * S * dUiSdXj[..., dirac] + + Ui * dS2dXj + + S2 * dUidXj[..., dirac], + axis=-1, + ) + # ( + # dUiS2dXj + # - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + # * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + # - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + # - 2 * UiS * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + # - 2 * S * dUiSdXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + # - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] + # * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] + # - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] + # ) + + S2_turb_diffusion = -dUjS2dXj output_file.create_dataset( "S2_turb_diffusion", data=S2_turb_diffusion, compression=None ) - del dUiS2dXj + del dUjS2dXj print(f"Done in {time.time() - 0:.2f} seconds.") # %% Scalar variance eqn: molecular diffusion @@ -520,7 +556,7 @@ def calculate_scalar_budgets_in_Cartesian( + S2_turb_diffusion + S2_molecular_diffusion + S2_dissipation - - S2_convection + + S2_convection ) output_file.create_dataset("S2_residual", data=S2_residual, compression=None) @@ -543,6 +579,8 @@ def calculate_scalar_budgets_in_Cartesian( UiS_convection[..., 1] = np.sum(Ui * dUiSdXj[..., 3:6], axis=-1) UiS_convection[..., 2] = np.sum(Ui * dUiSdXj[..., 6:9], axis=-1) + UiS_convection = -UiS_convection # Put on RHS + output_file.create_dataset( "UiS_convection", data=UiS_convection, compression=None ) @@ -601,31 +639,61 @@ def calculate_scalar_budgets_in_Cartesian( ) start_time = time.time() - dUiUjSdXk = np.array(input_scalar_file["dUiUjSdxk_struct"]) - - inds_i = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]) - inds_j = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]) - inds_k = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) - inds_didk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]) - inds_djdk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]) - inds_ij = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]) - dUiUjSdXk = ( - dUiUjSdXk - - Ui[..., inds_i] * Ui[..., inds_j] * dSdXj[..., inds_k] - - Ui[..., inds_j] * S * dUidXj[..., inds_didk] - - Ui[..., inds_i] * S * dUidXj[..., inds_djdk] - - Ui[..., inds_j] * dUiSdXj[..., inds_didk] - - UiS[..., inds_i] * dUidXj[..., inds_djdk] - - Ui[..., inds_i] * dUiSdXj[..., inds_djdk] - - UiS[..., inds_j] * dUidXj[..., inds_didk] - - S * dUiUjdXk - - UiUj[..., inds_ij] * dSdXj[..., inds_k] - ) + dUiUjSdXj = np.array(input_scalar_file["dUiUjSdxj_struct"]) - UiS_turb_diffusion = np.zeros((*Nxyz, 3)) for i in range(3): - inds = np.where((inds_i == i) & (inds_j == inds_k))[0] - UiS_turb_diffusion[..., i] = -np.sum(dUiUjSdXk[..., inds], axis=-1) + dUiUjSdXj[..., i] = ( + dUiUjSdXj[..., i] + - Ui[..., i] * np.sum(Ui * dSdXj, axis=-1) + - S * np.sum(Ui * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - Ui[..., i] * S * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - np.sum(Ui * dUiSdXj[..., 3 * i : 3 * i + 3], axis=-1) + - UiS[..., i] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - Ui[..., i] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) + - np.sum(UiS * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + ) + + dUiUjSdXj[..., 0] = ( + dUiUjSdXj[..., 0] + - S * np.sum(dUiUjdXk[..., [0, 10, 14]], axis=-1) + - np.sum(UiUj[..., [0, 3, 4]] * dSdXj, axis=-1) + ) + dUiUjSdXj[..., 1] = ( + dUiUjSdXj[..., 1] + - S * np.sum(dUiUjdXk[..., [9, 4, 17]], axis=-1) + - np.sum(UiUj[..., [3, 1, 5]] * dSdXj, axis=-1) + ) + dUiUjSdXj[..., 2] = ( + dUiUjSdXj[..., 2] + - S * np.sum(dUiUjdXk[..., [12, 16, 8]], axis=-1) + - np.sum(UiUj[..., [4, 5, 2]] * dSdXj, axis=-1) + ) + + UiS_turb_diffusion = -np.sum(dUiUjSdXj, axis=-1) + + # inds_i = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]) + # inds_j = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]) + # inds_k = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) + # inds_didk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]) + # inds_djdk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]) + # inds_ij = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]) + # dUiUjSdXk = ( + # dUiUjSdXk + # - Ui[..., inds_i] * Ui[..., inds_j] * dSdXj[..., inds_k] + # - Ui[..., inds_j] * S * dUidXj[..., inds_didk] + # - Ui[..., inds_i] * S * dUidXj[..., inds_djdk] + # - Ui[..., inds_j] * dUiSdXj[..., inds_didk] + # - UiS[..., inds_i] * dUidXj[..., inds_djdk] + # - Ui[..., inds_i] * dUiSdXj[..., inds_djdk] + # - UiS[..., inds_j] * dUidXj[..., inds_didk] + # - S * dUiUjdXk + # - UiUj[..., inds_ij] * dSdXj[..., inds_k] + # ) + + # UiS_turb_diffusion = np.zeros((*Nxyz, 3)) + # for i in range(3): + # inds = np.where((inds_i == i) & (inds_j == inds_k))[0] + # UiS_turb_diffusion[..., i] = -np.sum(dUiUjSdXk[..., inds], axis=-1) output_file.create_dataset( "UiS_turb_diffusion", data=UiS_turb_diffusion, compression=None @@ -633,449 +701,136 @@ def calculate_scalar_budgets_in_Cartesian( del dUiUjSdXk print(f"Done in {time.time() - start_time:.2f} seconds.") - # %% Turbulent scalar flux eqn: viscous diffusion - print( - "--------------working on turbulent scalar flux eqn: viscous diffusion..." - ) - start_time = time.time() - # %% Turbulent scalar flux eqn: molecular diffusion - - # %% Turbulent scalar flux eqn: scalar-pressure correlation gradient - - # %% Turbulent scalar flux eqn: pressure-scalar gradient correlation - - # %% Turbulent scalar flux eqn: dissipation - - # %% Turbulent scalar flux eqn: residual - - # %% Pressure-scalar gradient correlation - print("--------------working on pressure-scalar gradient correlation...") - start_time = time.time() - - PdSdXi = np.array(input_scalar_file["PdSdxi_struct"]) - - PdSdXi = PdSdXi - P * dSdXj - - output_file.create_dataset("PdSdXi", data=PdSdXi, compression=None) - print(f"Done in {time.time() - start_time:.2f} seconds.") - - # %% Velocity-scalar gradient correlation - print("--------------working on velocity-scalar gradient correlation...") - start_time = time.time() - - UidSdxj = np.array(input_scalar_file["UidSdxj_struct"]) - - UidSdxj = ( - UidSdxj - - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - ) - - output_file.create_dataset("UidSdxj", data=UidSdxj, compression=None) - print(f"Done in {time.time() - start_time:.2f} seconds.") - - # %% Scalar-velocity gradient correlation - print("--------------working on scalar-velocity gradient correlation...") - start_time = time.time() - - SdUidXj = np.array(input_scalar_file["SdUidxj_struct"]) - - SdUidXj = SdUidXj - S * dUidXj - - # %% Scalar dissipation - - # SDiss_vec = np.column_stack([load_field(i) for i in range(38, 42)]) - # d2Sdx2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) - # dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 57)]) - # dUiS2dxj_vec = np.column_stack([load_field(i) for i in range(57, 60)]) - # dPSdxj_vec = np.column_stack([load_field(i) for i in range(66, 69)]) - - # %% - print("--------------working on velocity gradients...") - start_time = time.time() - - # Read velocity gradient tensor from input file - dUidXj = np.array(input_file["dUidxj_struct"][()]) - - # Save to output file - output_file.create_dataset("dUidXj", data=dUidXj) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on momentum convection terms...") - start_time = time.time() - - # Reshape dUidXj - dUidXj_reshaped = dUidXj.reshape(Nxyz[0], Nxyz[1], Nxyz[2], 3, 3, order="F") - - # Compute Momentum convection - Momentum_convection = np.sum(UVW[..., np.newaxis] * dUidXj_reshaped, axis=3) - - # Save to output file - output_file.create_dataset("Momentum_convection", data=Momentum_convection) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on the residual momentum convection terms...") - start_time = time.time() - - # Compute Residual Momentum convection terms - Momentum_convectionRes = np.zeros((*Nxyz, 3)) # Shape: (N1, N2, N3, 3) - Momentum_convectionRes[..., 0] = np.sum( - UVW[..., [1, 2]] * dUidXj[..., [1, 2]], axis=-1 - ) - Momentum_convectionRes[..., 1] = np.sum( - UVW[..., [0, 2]] * dUidXj[..., [3, 5]], axis=-1 - ) - Momentum_convectionRes[..., 2] = np.sum( - UVW[..., [0, 1]] * dUidXj[..., [6, 7]], axis=-1 - ) - - # Save to output file - output_file.create_dataset( - "Momentum_convectionRes", data=Momentum_convectionRes - ) - del Momentum_convectionRes - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on momentum pressure terms...") - print("Warning: assuming rho=1 everywhere!") - start_time = time.time() - - # Compute Momentum pressure - Momentum_pressure = -np.array(input_file["dPdx_struct"][()]) - - # Save to output file - output_file.create_dataset("Momentum_pressure", data=Momentum_pressure) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on production terms...") - start_time = time.time() - - Prod_ij = np.zeros((*dUidXj.shape[:3], 6)) - - Prod_ij[..., 0] = -2 * np.sum(Rij[..., [0, 3, 4]] * dUidXj[..., :3], axis=3) - Prod_ij[..., 1] = -2 * np.sum(Rij[..., [3, 1, 5]] * dUidXj[..., 3:6], axis=3) - Prod_ij[..., 2] = -2 * np.sum(Rij[..., [4, 5, 2]] * dUidXj[..., 6:9], axis=3) - Prod_ij[..., 3] = -np.sum( - Rij[..., [0, 3, 4]] * dUidXj[..., 3:6] - + Rij[..., [3, 1, 5]] * dUidXj[..., :3], - axis=3, - ) - Prod_ij[..., 4] = -np.sum( - Rij[..., [0, 3, 4]] * dUidXj[..., 6:9] - + Rij[..., [4, 5, 2]] * dUidXj[..., :3], - axis=3, - ) - Prod_ij[..., 5] = -np.sum( - Rij[..., [3, 1, 5]] * dUidXj[..., 6:9] - + Rij[..., [4, 5, 2]] * dUidXj[..., 3:6], - axis=3, + print( + "--------------working on turbulent scalar flux eqn: molecular diffusion..." ) - - TKE_prod = np.sum(Prod_ij[..., :3], axis=3) / 2 - - output_file.create_dataset("Prod_ij", data=Prod_ij) - output_file.create_dataset("TKE_prod", data=TKE_prod) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on convection terms...") start_time = time.time() - dRij_dxk = np.array(input_file["dUiUjdx_struct"][:]) - - dRij_dxk = ( - dRij_dxk - - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] - * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] - - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] - * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] - ) - - Conv_ij = np.zeros((*UVW.shape[:3], 6)) - - Conv_ij[..., 0] = np.sum(UVW[..., :3] * dRij_dxk[..., :3], axis=3) - Conv_ij[..., 1] = np.sum(UVW[..., :3] * dRij_dxk[..., 3:6], axis=3) - Conv_ij[..., 2] = np.sum(UVW[..., :3] * dRij_dxk[..., 6:9], axis=3) - Conv_ij[..., 3] = np.sum(UVW[..., :3] * dRij_dxk[..., 9:12], axis=3) - Conv_ij[..., 4] = np.sum(UVW[..., :3] * dRij_dxk[..., 12:15], axis=3) - Conv_ij[..., 5] = np.sum(UVW[..., :3] * dRij_dxk[..., 15:18], axis=3) - - Conv_ij = -Conv_ij - TKE_conv = np.sum(Conv_ij[..., :3], axis=3) / 2 - - output_file.create_dataset("Conv_ij", data=Conv_ij) - output_file.create_dataset("TKE_conv", data=TKE_conv) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on momentum turbulent diffusion terms...") - start_time = time.time() - - Momentum_turb_diffusion = np.zeros((*dRij_dxk.shape[:3], 3)) - - Momentum_turb_diffusion[..., 0] = -np.sum(dRij_dxk[..., [0, 10, 14]], axis=3) - Momentum_turb_diffusion[..., 1] = -np.sum(dRij_dxk[..., [9, 4, 17]], axis=3) - Momentum_turb_diffusion[..., 2] = -np.sum(dRij_dxk[..., [12, 16, 8]], axis=3) - - output_file.create_dataset( - "Momentum_turb_diffusion", data=Momentum_turb_diffusion + dSdUidXjdXj = np.column_stack( + [ + np.array(input_scalar_file["dSdUdxjdxj_struct"]), + np.array(input_scalar_file["dSdVdxjdxj_struct"]), + np.array(input_scalar_file["dSdWdxjdxj_struct"]), + ] ) - print(f"Time taken: {time.time() - start_time:.2f} seconds") - # %% - print("--------------working on dissipation terms...") - start_time = time.time() - Diss_ij = np.array(input_file["pseudoDiss_struct"][()]) - - # Compute dissipation terms - Diss_ij[..., 0] -= np.sum(dUidXj[..., 0:3] ** 2, axis=-1) - Diss_ij[..., 1] -= np.sum(dUidXj[..., 3:6] ** 2, axis=-1) - Diss_ij[..., 2] -= np.sum(dUidXj[..., 6:9] ** 2, axis=-1) - Diss_ij[..., 3] -= np.sum(dUidXj[..., 0:3] * dUidXj[..., 3:6], axis=-1) - Diss_ij[..., 4] -= np.sum(dUidXj[..., 0:3] * dUidXj[..., 6:9], axis=-1) - Diss_ij[..., 5] -= np.sum(dUidXj[..., 3:6] * dUidXj[..., 6:9], axis=-1) - - Diss_ij = -2.0 / Rer_here * Diss_ij - TKE_diss = np.sum(Diss_ij[..., 0:3], axis=-1) / 2 - - # Save results to output HDF5 file - output_file.create_dataset("Diss_ij", data=Diss_ij, compression=None) - output_file.create_dataset("TKE_diss", data=TKE_diss, compression=None) - del Diss_ij - print(f"Time taken: {time.time() - start_time:.2f} seconds") - - # %% - print("--------------working on turbulent transport...") - start_time = time.time() + for i in range(3): + dSdUidXjdXj[..., i] = ( + dSdUidXjdXj[..., i] + - np.sum(dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - S * np.sum(d2UidXj2[..., 3 * i : 3 * i + 3], axis=-1) + ) - # Define index mappings - ind_dRjkdxk = np.array( + dUidSdXjdXj = np.column_stack( [ - (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3]) - 1, - (np.array([4, 2, 6]) - 1) * 3 + np.array([1, 2, 3]) - 1, - (np.array([5, 6, 3]) - 1) * 3 + np.array([1, 2, 3]) - 1, + np.array(input_scalar_file["dUdxjSdxj_struct"]), + np.array(input_scalar_file["dVdxjSdxj_struct"]), + np.array(input_scalar_file["dWdxjSdxj_struct"]), ] ) - ind_tripple = np.zeros((3, 3, 3), dtype=int) - ind_tripple[:, 0, 0] = (np.array([1, 4, 5]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 0, 1] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 0, 2] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 1, 0] = (np.array([4, 6, 10]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 1, 1] = (np.array([6, 2, 7]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 1, 2] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 2, 0] = (np.array([5, 10, 8]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 2, 1] = (np.array([10, 7, 9]) - 1) * 3 + np.array([1, 2, 3]) - 1 - ind_tripple[:, 2, 2] = (np.array([8, 9, 3]) - 1) * 3 + np.array([1, 2, 3]) - 1 - - # Allocate memory - TurbTrans_ij = np.zeros((*Nxyz, 3, 3), dtype=np.float32) - - # Compute turbulent transport for i in range(3): - for j in range(3): - TurbTrans_ij[..., i, j] = ( - -np.array(input_file["dUiUjUkdx_struct"][..., ind_tripple[0, i, j]]) - - np.array( - input_file["dUiUjUkdx_struct"][..., ind_tripple[1, i, j]] - ) - - np.array( - input_file["dUiUjUkdx_struct"][..., ind_tripple[2, i, j]] - ) - ) - - TurbTrans_ij[..., i, j] += ( - UVW[..., i] - * np.sum(UVW * dUidXj[..., (np.arange(3) + j * 3)], axis=-1) - + UVW[..., j] - * np.sum(UVW * dUidXj[..., (np.arange(3) + i * 3)], axis=-1) - + UVW[..., i] * np.sum(dRij_dxk[..., ind_dRjkdxk[j, :]], axis=-1) - + UVW[..., j] * np.sum(dRij_dxk[..., ind_dRjkdxk[i, :]], axis=-1) - ) - - # Reorder indices - TurbTrans_ij = TurbTrans_ij.reshape(TurbTrans_ij.shape[:-2] + (-1,), order="F") - TurbTrans_ij = TurbTrans_ij[..., [0, 4, 8, 1, 2, 5]] - - # Final computation - TurbTrans_ij -= Conv_ij + Prod_ij - TKE_turbTrans = np.sum(TurbTrans_ij[..., 0:3], axis=-1) / 2 - - # Save results - output_file.create_dataset("TurbTrans_ij", data=TurbTrans_ij, compression=None) - output_file.create_dataset( - "TKE_turbTrans", data=TKE_turbTrans, compression=None - ) - - # Cleanup - del TurbTrans_ij, Conv_ij, Prod_ij, dRij_dxk - - print(f"Done in {time.time() - start_time:.2f} seconds.") + dUidSdXjdXj[..., i] = ( + dUidSdXjdXj[..., i] + - np.sum(dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - Ui[..., i] * np.sum(d2SdXj2, axis=-1) + ) - # %% - print("--------------working on viscous diffusion...") - start_time = time.time() + UiS_molecular_diffusion = (1 / (Rer_here * Prt_here)) * dUidSdXjdXj + ( + 1 / Rer_here + ) * dSdUidXjdXj - # Read datasets - d2Ui_dx2 = np.array(input_file["d2Uidx2_struct"]) - d2Rij_dx2 = np.array(input_file["d2UiUjdx2_struct"]) - - # Compute d2Rij_dx2 - d2Rij_dx2 = ( - d2Rij_dx2 - - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] - * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] - - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]] - * d2Ui_dx2[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] - - 2 - * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]] - * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]] + output_file.create_dataset( + "UiS_molecular_diffusion", data=UiS_molecular_diffusion, compression=None ) + del dSdUidXjdXj, dUidSdXjdXj + print(f"Done in {time.time() - start_time:.2f} seconds.") - # Compute viscous diffusion - ViscDiff_ij = (1 / Rer_here) * np.sum( - d2Rij_dx2.reshape((*Nxyz, 3, -1), order="F"), axis=3 + # %% Turbulent scalar flux eqn: scalar-pressure correlation gradient + print( + "--------------working on turbulent scalar flux eqn: scalar-pressure correlation gradient..." ) + start_time = time.time() - # Compute TKE viscous diffusion - TKE_ViscDiff = np.sum(ViscDiff_ij[..., 0:3], axis=-1) / 2 - - # Save results - output_file.create_dataset("ViscDiff_ij", data=ViscDiff_ij, compression=None) - output_file.create_dataset("TKE_ViscDiff", data=TKE_ViscDiff, compression=None) - - # Cleanup - del d2Rij_dx2, ViscDiff_ij - - print(f"Done in {time.time() - start_time:.2f} seconds.") + dPSdXj = np.array(input_scalar_file["dPSdxj_struct"]) - # %% Momentum viscous diffusion terms - print("--------------working on momentum viscous diffusion terms...") - start_time = time.time() + dPSdXj = dPSdXj - P * dSdXj - S * dPdXj - Momentum_viscous_diffusion = (1 / Rer_here) * np.sum( - d2Ui_dx2.reshape(*Nxyz, 3, 3, order="F"), axis=3 - ) + UiS_scalar_pressure_correlation_gradient = -dPSdXj output_file.create_dataset( - "Momentum_viscous_diffusion", - data=Momentum_viscous_diffusion, + "UiS_scalar_pressure_correlation_gradient", + data=UiS_scalar_pressure_correlation_gradient, compression=None, ) - + del dPSdXj print(f"Done in {time.time() - start_time:.2f} seconds.") - # %% Momentum residual terms - print("--------------working on momentum residual terms...") - start_time = time.time() - - Momentum_residual = ( - Momentum_viscous_diffusion - + Momentum_turb_diffusion - + Momentum_pressure - - Momentum_convection - ) - - output_file.create_dataset( - "Momentum_residual", data=Momentum_residual, compression=None - ) - - # Cleanup - del ( - Momentum_viscous_diffusion, - Momentum_turb_diffusion, - Momentum_pressure, - Momentum_convection, - Momentum_residual, + # %% Turbulent scalar flux eqn: pressure-scalar gradient correlation + print( + "--------------working on turbulent scalar flux eqn: pressure-scalar gradient correlation..." ) - - print(f"Done in {time.time() - start_time:.2f} seconds.") - - # %% Pressure-rate-of-strain terms - print("--------------working on pressure-rate-of-strain terms...") start_time = time.time() - PRS_ij = np.array(input_file["PGij_struct"]) - PRS_ij = PRS_ij - P * dUidXj - PRS_ij = PRS_ij[..., [0, 4, 8, 1, 2, 5]] + PRS_ij[..., [0, 4, 8, 3, 6, 7]] - - output_file.create_dataset("PRS_ij", data=PRS_ij, compression=None) - - print(f"Done in {time.time() - start_time:.2f} seconds.") - - # %% - print("--------------working on pressure transport terms...") - start_time = time.time() - - dpdx = np.array(input_file["dPdx_struct"]) - dpudx = np.array(input_file["dPUidxj_struct"]) + PdSdXi = np.array(input_scalar_file["PdSdxi_struct"]) - dpudx = ( - dpudx - - (P * dUidXj) - - ( - UVW[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - * dpdx[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - ) - ) + PdSdXi = PdSdXi - P * dSdXj - PressTrans_ij = -( - dpudx[..., [0, 4, 8, 1, 2, 5]] + dpudx[..., [0, 4, 8, 3, 6, 7]] - ) + UiS_pressure_scalar_gradient_correlation = PdSdXi - output_file.create_dataset("dpdx", data=dpdx, compression=None) output_file.create_dataset( - "PressTrans_ij", data=PressTrans_ij, compression=None + "UiS_pressure_scalar_gradient_correlation", + data=UiS_pressure_scalar_gradient_correlation, + compression=None, ) - - del dpudx + del PdSdXi print(f"Done in {time.time() - start_time:.2f} seconds.") - # %% Velocity-Pressure Gradient Terms - print("--------------working on velocity-pressure-gradient terms...") + # %% Turbulent scalar flux eqn: dissipation + print("--------------working on turbulent scalar flux eqn: dissipation...") start_time = time.time() - VelPressGrad_ij = PressTrans_ij - PRS_ij - TKE_VelPressGrad = np.sum(VelPressGrad_ij[..., 0:3], axis=-1) / 2 + UiSDiss = np.array(input_scalar_file["SDiss_struct"])[..., [1, 2, 3]] + for i in range(3): + UiSDiss[..., i] = UiSDiss[..., i] - np.sum( + dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1 + ) + + UiS_dissipation = -((1 / Rer_here) + (1 / (Rer_here * Prt_here))) * UiSDiss output_file.create_dataset( - "VelPressGrad_ij", data=VelPressGrad_ij, compression=None - ) - output_file.create_dataset( - "TKE_VelPressGrad", data=TKE_VelPressGrad, compression=None + "UiS_dissipation", data=UiS_dissipation, compression=None ) - - del PRS_ij, PressTrans_ij, VelPressGrad_ij + del UiSDiss print(f"Done in {time.time() - start_time:.2f} seconds.") - # %% TKE budget residual - print("--------------working on TKE budgets...") + # %% Turbulent scalar flux eqn: residual + print("--------------working on turbulent scalar flux eqn: residual...") start_time = time.time() - TKE_residual = ( - TKE_prod - + TKE_diss - + TKE_turbTrans - + TKE_ViscDiff - + TKE_VelPressGrad - + TKE_conv + UiS_residual = ( + UiS_velocity_gradient_production + + UiS_scalar_gradient_production + + UiS_turb_diffusion + + UiS_molecular_diffusion + + UiS_scalar_pressure_correlation_gradient + + UiS_pressure_scalar_gradient_correlation + + UiS_dissipation + + UiS_convection ) - output_file.create_dataset("TKE_residual", data=TKE_residual, compression=None) - + output_file.create_dataset("UiS_residual", data=UiS_residual, compression=None) del ( - TKE_prod, - TKE_diss, - TKE_turbTrans, - TKE_ViscDiff, - TKE_VelPressGrad, - TKE_conv, - TKE_residual, + UiS_velocity_gradient_production, + UiS_scalar_gradient_production, + UiS_turb_diffusion, + UiS_molecular_diffusion, + UiS_scalar_pressure_correlation_gradient, + UiS_pressure_scalar_gradient_correlation, + UiS_dissipation, + UiS_convection, + UiS_residual, ) print(f"Done in {time.time() - start_time:.2f} seconds.") From f453c6359d17751e9b4721dd16f6b4b04eee861c Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Mon, 23 Feb 2026 13:29:03 +0100 Subject: [PATCH 07/13] Initial bug fixes --- .../passive_scalar_budgets_interpolatedPoints_notMPI.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index cce5995..fa4d7c7 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -466,7 +466,6 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset( "S2_convection", data=S2_convection, compression=None ) - del dS2dXj print(f"Done in {time.time() - start_time:.2f} seconds.") # %% Scalar variance eqn: production @@ -514,8 +513,8 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset( "S2_turb_diffusion", data=S2_turb_diffusion, compression=None ) - del dUjS2dXj - print(f"Done in {time.time() - 0:.2f} seconds.") + del dUjS2dXj, dS2dXj + print(f"Done in {time.time() - start_time:.2f} seconds.") # %% Scalar variance eqn: molecular diffusion print("--------------working on scalar variance eqn: molecular diffusion...") @@ -529,7 +528,7 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset( "S2_molecular_diffusion", data=S2_molecular_diffusion, compression=None ) - del d2S2dXi2, d2SdXj2 + del d2S2dXi2 print(f"Done in {time.time() - start_time:.2f} seconds.") # %% Scalar variance eqn: dissipation @@ -744,7 +743,7 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset( "UiS_molecular_diffusion", data=UiS_molecular_diffusion, compression=None ) - del dSdUidXjdXj, dUidSdXjdXj + del dSdUidXjdXj, dUidSdXjdXj, d2SdXj2 print(f"Done in {time.time() - start_time:.2f} seconds.") # %% Turbulent scalar flux eqn: scalar-pressure correlation gradient From 517e6e5acf204f890d3b984883deb659aa785137 Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Mon, 23 Feb 2026 19:06:13 +0200 Subject: [PATCH 08/13] At least got the code running --- .../statistics/passive_scalar_budgets.py | 139 +++++++++++++----- ...calar_budgets_interpolatedPoints_notMPI.py | 82 +++++------ 2 files changed, 141 insertions(+), 80 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index a270710..a6743aa 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -112,6 +112,24 @@ def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): gradU.clear() +########################################################################################### +########################################################################################### + + +# %% generic function to write the sum of a 3-component (vector) field (as a scalar) +def write_file_sum3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.io.ppymech.neksuite import pynekwrite + import numpy as np + + gradU = FieldRegistry(comm) + + gradU.add_field(comm, field_name="sum", field=dU_dxi.c1 + dU_dxi.c2 + dU_dxi.c3, dtype=np.single) + + pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) + + gradU.clear() + ########################################################################################### ########################################################################################### @@ -140,10 +158,11 @@ def return_list_of_vars_from_filename(fname): scal_fields = 37 field_names = [] + for i in range(vel_fields): tmp = ["vel_" + str(i)] field_names = field_names + tmp - + if pres_fields == 1: field_names = field_names + ["pres"] @@ -175,12 +194,73 @@ def do_dssum_on_3comp_vector(dU_dxi, msh_conn, msh): ########################################################################################### ########################################################################################### +#%% convert 2D statistics to 3D +def convert_scalar_2Dstats_to_3D(stats2D_filename,stats3D_filename,datatype='single'): + from mpi4py import MPI + import numpy as np + import warnings + from pysemtools.io.ppymech.neksuite import pynekread,pynekwrite + from pysemtools.datatypes.msh import Mesh + from pysemtools.datatypes.field import FieldRegistry + from pysemtools.datatypes.utils import extrude_2d_sem_mesh + from pysemtools.interpolation.interpolator import get_bbox_from_coordinates + from pysemtools.interpolation.point_interpolator.single_point_helper_functions import GLL_pwts + + # Get mpi info + comm = MPI.COMM_WORLD + + # initialize fields + msh = Mesh(comm, create_connectivity=False) + fld = FieldRegistry(comm) + + if datatype=='single': + data_type = np.single + wdsz = 4 + else: + data_type = np.double + wdsz = 8 + + # read mesh and fields + pynekread(stats2D_filename, comm, data_dtype=data_type, msh=msh, fld=fld) + + # get the + field_names = return_list_of_vars_from_filename(stats2D_filename) + print('fields in the given file: ', field_names ) + + # extruding mesh and fields + ## Find how much to extrude - Extrude the size of the smallest element + bbox = get_bbox_from_coordinates(msh.x, msh.y, msh.z) + bbox_dist = np.zeros((bbox.shape[0], 3)) + bbox_dist[:, 0] = bbox[:, 1] - bbox[:, 0] + bbox_dist[:, 1] = bbox[:, 3] - bbox[:, 2] + bbox_dist[:, 2] = bbox[:, 5] - bbox[:, 4] + local_bbox_min_dist = np.min( + np.sqrt(bbox_dist[:, 0] ** 2 + bbox_dist[:, 1] ** 2 + bbox_dist[:, 2] ** 2) / 2 + ) + bbox_min_dist = comm.allreduce(local_bbox_min_dist, op=MPI.MIN) + ## Generate the point distribution + x_, _ = GLL_pwts(msh.lx) + extrusion_size = bbox_min_dist + point_dist = np.flip(x_ * extrusion_size) + msh3d, fld3d = extrude_2d_sem_mesh(comm, lz = msh.lx, msh = msh, fld = fld, point_dist=point_dist) + + # filling in the missing z velocity + z_vel = fld3d.registry['s37'] + fld3d.add_field(comm, field_name="w", field=z_vel, dtype=np.single) + warnings.warn('The s37 field () was not removed from the file. '+ + 'Be careful with potential inconsistencies!') + + # writing the extruded stats file + pynekwrite(stats3D_filename, comm, msh=msh3d, fld=fld3d, write_mesh=True, wdsz=wdsz) + + ########################################################################################### ########################################################################################### ########################################################################################### ########################################################################################### ## generic function to compute the additional fields required for budget terms, etc. +## only implemented for Neko ########################################################################################### ########################################################################################### def compute_and_write_additional_sstat_fields( @@ -189,7 +269,6 @@ def compute_and_write_additional_sstat_fields( fname_mean, fname_stat, if_write_mesh=False, - which_code="NEKO", if_do_dssum_on_derivatives=False, ): @@ -201,9 +280,6 @@ def compute_and_write_additional_sstat_fields( if fname_mean != fname_stat: sys.exit("fname_mean must be the same as fname_stat") - # Scalar statistics only implemented for Neko - if which_code.casefold() != "NEKO": - sys.exit("only NEKO is supported for passive scalar statistics") ########################################################################################### import warnings @@ -266,15 +342,15 @@ def compute_and_write_additional_sstat_fields( # define file_keys of the fields based on the codes # Neko key names taken from: https://neko.cfd/docs/develop/df/d8f/statistics-guide.html - file_keys_S = ["pres"] + file_keys_S = "pres" # "US" "VS" "WS" file_keys_UiS = ["vel_0", "vel_1", "vel_2"] - file_keys_SS = ["temp"] + file_keys_SS = "temp" # "USS" "VSS" "WSS" file_keys_UiSS = ["scal_2", "scal_3", "scal_4"] # "UUS" "VVS" "WWS" "UVS" "UWS" "VWS" file_keys_UiUjS = ["scal_5", "scal_6", "scal_7", "scal_8", "scal_9", "scal_10"] - file_keys_PS = ["scal_11"] + file_keys_PS = "scal_11" file_keys_UidSdxj = [ "scal_15", @@ -458,10 +534,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUjSSdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -515,10 +591,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUUjSdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -568,10 +644,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dVUjSdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -621,10 +697,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dWUjSdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -702,10 +778,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUdSdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -755,10 +831,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dVdSdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -808,10 +884,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dWdSdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -865,10 +941,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdUdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -918,10 +994,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdVdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -971,10 +1047,10 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdWdxjdxj" + this_ext - write_file_3c( + write_file_sum3c( comm, msh, - dQ1_dxi.c1 + dQ2_dxi.c2 + dQ3_dxi.c3, + dQ1_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -991,6 +1067,7 @@ def compute_and_write_additional_sstat_fields( ########################################################################################### ########################################################################################### # interpolate the 42+N fields onto the user specified set of points +# only implemented for Neko ########################################################################################### ########################################################################################### def interpolate_all_stat_and_sstat_fields_onto_points( @@ -999,7 +1076,6 @@ def interpolate_all_stat_and_sstat_fields_onto_points( fname_mean, fname_stat, xyz, - which_code="NEKO", if_do_dssum_before_interp=True, if_create_boundingBox_for_interp=False, if_pass_points_to_rank0_only=True, @@ -1031,9 +1107,6 @@ def interpolate_all_stat_and_sstat_fields_onto_points( if fname_mean != fname_stat: sys.exit("fname_mean must be the same as fname_stat") - # Scalar statistics only implemented for Neko - if which_code.casefold() != "NEKO": - sys.exit("only NEKO is supported for passive scalar statistics") ########################################################################################### # Get mpi info @@ -1058,7 +1131,7 @@ def interpolate_all_stat_and_sstat_fields_onto_points( full_fname_mesh = which_dir + "/" + fname_mesh # get the file name for the 42 fileds collected in run time - these_names = which_dir + "/" + fname_stat + these_names = [ which_dir + "/" + fname_stat ] # add the name of the additional fields these_names.extend( diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index fa4d7c7..d8f4d3a 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -55,18 +55,18 @@ def load_field(num, prefix="interpolated_scalar_fields"): print("reading the 42 statistics fields...") start_time = time.time() - S_vec = load_field(0) + S_vec = load_field(4) UiS_vec = np.column_stack([load_field(i) for i in range(1, 4)]) - S2_vec = load_field(4) - S3_vec = load_field(5) - S4_vec = load_field(6) - UiS2_vec = np.column_stack([load_field(i) for i in range(7, 10)]) - UiUjS_vec = np.column_stack([load_field(i) for i in range(10, 16)]) - PS_vec = load_field(16) - PdSdxi_vec = np.column_stack([load_field(i) for i in range(17, 20)]) - UidSdxj_vec = np.column_stack([load_field(i) for i in range(20, 29)]) - SdUidxj_vec = np.column_stack([load_field(i) for i in range(29, 38)]) - SDiss_vec = np.column_stack([load_field(i) for i in range(38, 42)]) + S2_vec = load_field(5) + S3_vec = load_field(6) + S4_vec = load_field(7) + UiS2_vec = np.column_stack([load_field(i) for i in range(8, 11)]) + UiUjS_vec = np.column_stack([load_field(i) for i in range(11, 17)]) + PS_vec = load_field(17) + PdSdxi_vec = np.column_stack([load_field(i) for i in range(18, 21)]) + UidSdxj_vec = np.column_stack([load_field(i) for i in range(21, 30)]) + SdUidxj_vec = np.column_stack([load_field(i) for i in range(30, 39)]) + SDiss_vec = np.column_stack([load_field(i) for i in range(39, 43)]) print(f"Done in {time.time() - start_time:.2f} seconds.") @@ -81,16 +81,16 @@ def load_field(num, prefix="interpolated_scalar_fields"): print("reading the additional fields...") start_time = time.time() - dSdxj_vec = np.column_stack([load_field(i) for i in range(42, 45)]) - d2Sdxj2_vec = np.column_stack([load_field(i) for i in range(45, 48)]) - dS2dxj_vec = np.column_stack([load_field(i) for i in range(48, 51)]) - d2S2dxj2_vec = np.column_stack([load_field(i) for i in range(51, 54)]) - dUiSdxj_vec = np.column_stack([load_field(i) for i in range(54, 63)]) - dUjS2dxj_vec = np.column_stack([load_field(63)]) - dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(64, 67)]) - dPSdxj_vec = np.column_stack([load_field(i) for i in range(67, 70)]) - dUidSdxjdxj_vec = np.column_stack([load_field(i) for i in range(70, 73)]) - dSdUidxjdxj_vec = np.column_stack([load_field(i) for i in range(73, 76)]) + dSdxj_vec = np.column_stack([load_field(i) for i in range(43, 46)]) + d2Sdxj2_vec = np.column_stack([load_field(i) for i in range(46, 49)]) + dS2dxj_vec = np.column_stack([load_field(i) for i in range(49, 52)]) + d2S2dxj2_vec = np.column_stack([load_field(i) for i in range(52, 55)]) + dUiSdxj_vec = np.column_stack([load_field(i) for i in range(55, 64)]) + dUjS2dxj_vec = np.column_stack([load_field(64)]) + dUiUjSdxj_vec = np.column_stack([load_field(i) for i in range(65, 68)]) + dPSdxj_vec = np.column_stack([load_field(i) for i in range(68, 71)]) + dUidSdxjdxj_vec = np.column_stack([load_field(i) for i in range(71, 74)]) + dSdUidxjdxj_vec = np.column_stack([load_field(i) for i in range(74, 77)]) print("Finished reading all fields.") print(f"Done in {time.time() - start_time:.2f} seconds.") @@ -279,7 +279,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------loading pressure gradients...") start_time = time.time() - dPdXj = np.array(input_fluid_file["dPdxj_struct"]) + dPdXj = np.array(input_fluid_file["dPdx_struct"]) print(f"Done in {time.time() - start_time:.2f} seconds.") @@ -296,7 +296,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------loading Reynolds stress gradients...") start_time = time.time() - dUiUjdXk = np.array(input_fluid_file["UiUjdx_struct"]) + dUiUjdXk = np.array(input_fluid_file["dUiUjdx_struct"]) dUiUjdXk = ( dUiUjdXk - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]] @@ -334,7 +334,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on scalar fluxes...") start_time = time.time() - UiS = np.array(input_fluid_file["UiS_struct"]) + UiS = np.array(input_scalar_file["UiS_struct"]) UiS = UiS - Ui * S @@ -346,7 +346,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on scalar variance fluxes...") start_time = time.time() - UiS2 = np.array(input_fluid_file["UiS2_struct"]) + UiS2 = np.array(input_scalar_file["UiS2_struct"]) UiS2 = UiS2 - Ui * S2 - 2 * S * UiS - Ui * S**2 @@ -358,7 +358,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on scalar flux transport...") start_time = time.time() - UiUjS = np.array(input_fluid_file["UiUjS_struct"]) + UiUjS = np.array(input_scalar_file["UiUjS_struct"]) UiUjS = ( UiUjS @@ -644,8 +644,8 @@ def calculate_scalar_budgets_in_Cartesian( dUiUjSdXj[..., i] = ( dUiUjSdXj[..., i] - Ui[..., i] * np.sum(Ui * dSdXj, axis=-1) - - S * np.sum(Ui * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) - - Ui[..., i] * S * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - S[..., 0] * np.sum(Ui * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) + - Ui[..., i] * S[..., 0] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) - np.sum(Ui * dUiSdXj[..., 3 * i : 3 * i + 3], axis=-1) - UiS[..., i] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) - Ui[..., i] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) @@ -654,17 +654,17 @@ def calculate_scalar_budgets_in_Cartesian( dUiUjSdXj[..., 0] = ( dUiUjSdXj[..., 0] - - S * np.sum(dUiUjdXk[..., [0, 10, 14]], axis=-1) + - S[..., 0] * np.sum(dUiUjdXk[..., [0, 10, 14]], axis=-1) - np.sum(UiUj[..., [0, 3, 4]] * dSdXj, axis=-1) ) dUiUjSdXj[..., 1] = ( dUiUjSdXj[..., 1] - - S * np.sum(dUiUjdXk[..., [9, 4, 17]], axis=-1) + - S[..., 0] * np.sum(dUiUjdXk[..., [9, 4, 17]], axis=-1) - np.sum(UiUj[..., [3, 1, 5]] * dSdXj, axis=-1) ) dUiUjSdXj[..., 2] = ( dUiUjSdXj[..., 2] - - S * np.sum(dUiUjdXk[..., [12, 16, 8]], axis=-1) + - S[..., 0] * np.sum(dUiUjdXk[..., [12, 16, 8]], axis=-1) - np.sum(UiUj[..., [4, 5, 2]] * dSdXj, axis=-1) ) @@ -697,7 +697,7 @@ def calculate_scalar_budgets_in_Cartesian( output_file.create_dataset( "UiS_turb_diffusion", data=UiS_turb_diffusion, compression=None ) - del dUiUjSdXk + del dUiUjSdXj print(f"Done in {time.time() - start_time:.2f} seconds.") # %% Turbulent scalar flux eqn: molecular diffusion @@ -706,28 +706,16 @@ def calculate_scalar_budgets_in_Cartesian( ) start_time = time.time() - dSdUidXjdXj = np.column_stack( - [ - np.array(input_scalar_file["dSdUdxjdxj_struct"]), - np.array(input_scalar_file["dSdVdxjdxj_struct"]), - np.array(input_scalar_file["dSdWdxjdxj_struct"]), - ] - ) + dSdUidXjdXj = np.array(input_scalar_file["dSdUidxjdxj_struct"]) for i in range(3): dSdUidXjdXj[..., i] = ( dSdUidXjdXj[..., i] - np.sum(dSdXj * dUidXj[..., 3 * i : 3 * i + 3], axis=-1) - - S * np.sum(d2UidXj2[..., 3 * i : 3 * i + 3], axis=-1) + - S[..., 0] * np.sum(d2UidXj2[..., 3 * i : 3 * i + 3], axis=-1) ) - dUidSdXjdXj = np.column_stack( - [ - np.array(input_scalar_file["dUdxjSdxj_struct"]), - np.array(input_scalar_file["dVdxjSdxj_struct"]), - np.array(input_scalar_file["dWdxjSdxj_struct"]), - ] - ) + dUidSdXjdXj = np.array(input_scalar_file["dUidSdxjdxj_struct"]) for i in range(3): dUidSdXjdXj[..., i] = ( From ffdd3747b8590f990dac504765b9c0cdba64d52f Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Mon, 23 Feb 2026 19:08:22 +0200 Subject: [PATCH 09/13] Added write message --- .../passive_scalar_budgets_interpolatedPoints_notMPI.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index d8f4d3a..648d1a4 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -584,6 +584,8 @@ def calculate_scalar_budgets_in_Cartesian( "UiS_convection", data=UiS_convection, compression=None ) + print(f"Done in {time.time() - start_time:.2f} seconds.") + # %% Turbulent scalar flux eqn: velocity gradient production print( "--------------working on turbulent scalar flux eqn: velocity gradient production..." From 94c9c99f9a6bd9df408f8743213ffc3f09ca5cc2 Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Tue, 24 Feb 2026 12:41:02 +0200 Subject: [PATCH 10/13] fix kwargs for find_points_tol --- .../statistics/passive_scalar_budgets.py | 41 +++++++------------ 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index a6743aa..af33180 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -1201,37 +1201,24 @@ def interpolate_all_stat_and_sstat_fields_onto_points( # point_interpolator_type="multiple_point_legendre_numpy", \ # global_tree_type="domain_binning" , \ # max_pts = 256 ) + + probe_kwargs = { + "comm": comm, + "msh": msh, + "point_interpolator_type": "multiple_point_legendre_numpy", + "max_pts": 128, + "output_fname": interpolation_output_fname, + } + if find_points_tol is not None: + probe_kwargs["find_points_tol"] = find_points_tol + if not if_pass_points_to_rank0_only: - probes = Probes( - comm, - probes=xyz, - msh=msh, - point_interpolator_type="multiple_point_legendre_numpy", - max_pts=128, - output_fname=interpolation_output_fname, - find_points_tol=find_points_tol, - ) + probes = Probes(probes=xyz, **probe_kwargs) else: if comm.Get_rank() == 0: - probes = Probes( - comm, - probes=xyz, - msh=msh, - point_interpolator_type="multiple_point_legendre_numpy", - max_pts=128, - output_fname=interpolation_output_fname, - find_points_tol=find_points_tol, - ) + probes = Probes(probes=xyz, **probe_kwargs) else: - probes = Probes( - comm, - probes=None, - msh=msh, - point_interpolator_type="multiple_point_legendre_numpy", - max_pts=128, - output_fname=interpolation_output_fname, - find_points_tol=find_points_tol, - ) + probes = Probes(probes=None, **probe_kwargs) ########################################################################################### for fname in these_names: From a650884b4094ef341599ede33baaa5ed61ccd3aa Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Tue, 24 Feb 2026 13:55:03 +0200 Subject: [PATCH 11/13] Budgets seem to be working --- .../statistics/passive_scalar_budgets.py | 46 +++++++++---- ...calar_budgets_interpolatedPoints_notMPI.py | 66 +++++-------------- 2 files changed, 49 insertions(+), 63 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py index af33180..6e9acba 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets.py @@ -116,15 +116,15 @@ def write_file_3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): ########################################################################################### -# %% generic function to write the sum of a 3-component (vector) field (as a scalar) -def write_file_sum3c(comm, msh, dU_dxi, fname_gradU, if_write_mesh): +# %% generic function to write the trace of three 3-component (vector) fields (as a scalar) +def write_file_trace3(comm, msh, dU_dxi, dV_dxi, dW_dxi, fname_gradU, if_write_mesh): from pysemtools.datatypes.field import FieldRegistry from pysemtools.io.ppymech.neksuite import pynekwrite import numpy as np gradU = FieldRegistry(comm) - gradU.add_field(comm, field_name="sum", field=dU_dxi.c1 + dU_dxi.c2 + dU_dxi.c3, dtype=np.single) + gradU.add_field(comm, field_name="sum", field=dU_dxi.c1 + dV_dxi.c2 + dW_dxi.c3, dtype=np.single) pynekwrite(fname_gradU, comm, msh=msh, fld=gradU, wdsz=4, write_mesh=if_write_mesh) @@ -534,10 +534,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUjSSdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -591,10 +593,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUUjSdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -644,10 +648,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dVUjSdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -697,10 +703,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dWUjSdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -778,10 +786,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dUdSdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -831,10 +841,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dVdSdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -884,10 +896,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dWdSdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -941,10 +955,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdUdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -994,10 +1010,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdVdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) @@ -1047,10 +1065,12 @@ def compute_and_write_additional_sstat_fields( do_dssum_on_3comp_vector(dQ3_dxi, msh_conn, msh) this_file_name = which_dir + "/dSdWdxjdxj" + this_ext - write_file_sum3c( + write_file_trace3( comm, msh, dQ1_dxi, + dQ2_dxi, + dQ3_dxi, this_file_name, if_write_mesh=if_write_mesh, ) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index 648d1a4..b51db82 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -417,7 +417,7 @@ def calculate_scalar_budgets_in_Cartesian( - S * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] ) - S_turb_diffusion = -np.sum(dUiSdXj[:, 0:3], axis=-1) + S_turb_diffusion = -np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) output_file.create_dataset( "S_turb_diffusion", data=S_turb_diffusion, compression=None @@ -486,27 +486,15 @@ def calculate_scalar_budgets_in_Cartesian( dUjS2dXj = np.array(input_scalar_file["dUjS2dxj_struct"]) - dirac = [0, 4, 8] - dUjS2dXj = dUjS2dXj - np.sum( - Ui * dS2dXj - + S2 * dUidXj[..., dirac] - + 2 * UiS * dSdXj - + 2 * S * dUiSdXj[..., dirac] - + Ui * dS2dXj - + S2 * dUidXj[..., dirac], - axis=-1, - ) - # ( - # dUiS2dXj - # - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - # * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - # - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - # - 2 * UiS * dSdXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - # - 2 * S * dUiSdXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - # - Ui[..., [0, 0, 0, 1, 1, 1, 2, 2, 2]] - # * dS2dXj[..., [0, 1, 2, 0, 1, 2, 0, 1, 2]] - # - S2 * dUidXj[..., [0, 1, 2, 3, 4, 5, 6, 7, 8]] - # ) + dUjS2dXj = ( + dUjS2dXj[..., 0] + - 2 * np.sum(Ui * dSdXj, axis=-1) + - S[..., 0]**2 * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + - 2 * np.sum(UiS * dSdXj, axis=-1) + - 2 * S[..., 0] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) + - np.sum(Ui * dS2dXj, axis=-1) + - S2[..., 0] * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + ) S2_turb_diffusion = -dUjS2dXj @@ -573,6 +561,7 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on turbulent scalar flux eqn: convection...") start_time = time.time() + UiS_convection = np.zeros((*Nxyz, 3)) UiS_convection[..., 0] = np.sum(Ui * dUiSdXj[..., :3], axis=-1) UiS_convection[..., 1] = np.sum(Ui * dUiSdXj[..., 3:6], axis=-1) @@ -594,13 +583,13 @@ def calculate_scalar_budgets_in_Cartesian( UiS_velocity_gradient_production = np.zeros((*Nxyz, 3)) UiS_velocity_gradient_production[..., 0] = -np.sum( - UiS * dUidXj[..., [0, 3, 6]], axis=-1 + UiS * dUidXj[..., :3], axis=-1 ) UiS_velocity_gradient_production[..., 1] = -np.sum( - UiS * dUidXj[..., [1, 4, 7]], axis=-1 + UiS * dUidXj[..., 3:6], axis=-1 ) UiS_velocity_gradient_production[..., 2] = -np.sum( - UiS * dUidXj[..., [2, 5, 8]], axis=-1 + UiS * dUidXj[..., 6:9], axis=-1 ) output_file.create_dataset( @@ -670,31 +659,7 @@ def calculate_scalar_budgets_in_Cartesian( - np.sum(UiUj[..., [4, 5, 2]] * dSdXj, axis=-1) ) - UiS_turb_diffusion = -np.sum(dUiUjSdXj, axis=-1) - - # inds_i = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1]) - # inds_j = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2]) - # inds_k = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) - # inds_didk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 0, 1, 2, 3, 4, 5]) - # inds_djdk = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 6, 7, 8]) - # inds_ij = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]) - # dUiUjSdXk = ( - # dUiUjSdXk - # - Ui[..., inds_i] * Ui[..., inds_j] * dSdXj[..., inds_k] - # - Ui[..., inds_j] * S * dUidXj[..., inds_didk] - # - Ui[..., inds_i] * S * dUidXj[..., inds_djdk] - # - Ui[..., inds_j] * dUiSdXj[..., inds_didk] - # - UiS[..., inds_i] * dUidXj[..., inds_djdk] - # - Ui[..., inds_i] * dUiSdXj[..., inds_djdk] - # - UiS[..., inds_j] * dUidXj[..., inds_didk] - # - S * dUiUjdXk - # - UiUj[..., inds_ij] * dSdXj[..., inds_k] - # ) - - # UiS_turb_diffusion = np.zeros((*Nxyz, 3)) - # for i in range(3): - # inds = np.where((inds_i == i) & (inds_j == inds_k))[0] - # UiS_turb_diffusion[..., i] = -np.sum(dUiUjSdXk[..., inds], axis=-1) + UiS_turb_diffusion = -dUiUjSdXj output_file.create_dataset( "UiS_turb_diffusion", data=UiS_turb_diffusion, compression=None @@ -788,6 +753,7 @@ def calculate_scalar_budgets_in_Cartesian( ) UiS_dissipation = -((1 / Rer_here) + (1 / (Rer_here * Prt_here))) * UiSDiss + output_file.create_dataset( "UiS_dissipation", data=UiS_dissipation, compression=None ) From 5cbabf4111936db69ab026f60cb89ed0b6a8bbdd Mon Sep 17 00:00:00 2001 From: Nick Morse Date: Wed, 25 Feb 2026 14:28:27 +0100 Subject: [PATCH 12/13] Fix to scalar variance convection and turbulent diffusion --- .../passive_scalar_budgets_interpolatedPoints_notMPI.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py index b51db82..4bbf153 100644 --- a/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py +++ b/pysemtools/postprocessing/statistics/passive_scalar_budgets_interpolatedPoints_notMPI.py @@ -459,6 +459,8 @@ def calculate_scalar_budgets_in_Cartesian( start_time = time.time() dS2dXj = np.array(input_scalar_file["dS2dxj_struct"]) + dS2dXj = dS2dXj - 2 * S * dSdXj + S2_convection = np.sum(Ui * dS2dXj, axis=-1) S2_convection = -S2_convection # Put on RHS @@ -487,9 +489,9 @@ def calculate_scalar_budgets_in_Cartesian( dUjS2dXj = np.array(input_scalar_file["dUjS2dxj_struct"]) dUjS2dXj = ( - dUjS2dXj[..., 0] - - 2 * np.sum(Ui * dSdXj, axis=-1) - - S[..., 0]**2 * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) + dUjS2dXj[..., 0] + - 2 * S[..., 0] * np.sum(Ui * dSdXj, axis=-1) + - S[..., 0] ** 2 * np.sum(dUidXj[..., [0, 4, 8]], axis=-1) - 2 * np.sum(UiS * dSdXj, axis=-1) - 2 * S[..., 0] * np.sum(dUiSdXj[..., [0, 4, 8]], axis=-1) - np.sum(Ui * dS2dXj, axis=-1) @@ -561,7 +563,6 @@ def calculate_scalar_budgets_in_Cartesian( print("--------------working on turbulent scalar flux eqn: convection...") start_time = time.time() - UiS_convection = np.zeros((*Nxyz, 3)) UiS_convection[..., 0] = np.sum(Ui * dUiSdXj[..., :3], axis=-1) UiS_convection[..., 1] = np.sum(Ui * dUiSdXj[..., 3:6], axis=-1) From 9e5b40ad16f03e03f63f854d7b9c331996106d47 Mon Sep 17 00:00:00 2001 From: Adalberto Date: Thu, 26 Feb 2026 15:28:28 +0100 Subject: [PATCH 13/13] Addex unindexed example --- .../scalar_budgets/process_scalar_stats.py | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 examples/unindexed/scalar_budgets/process_scalar_stats.py diff --git a/examples/unindexed/scalar_budgets/process_scalar_stats.py b/examples/unindexed/scalar_budgets/process_scalar_stats.py new file mode 100644 index 0000000..99adeb9 --- /dev/null +++ b/examples/unindexed/scalar_budgets/process_scalar_stats.py @@ -0,0 +1,208 @@ +# Import required modules +from mpi4py import MPI #equivalent to the use of MPI_init() in C +import matplotlib.pyplot as plt +import numpy as np +import os + +# Get mpi info +comm = MPI.COMM_WORLD + +sem_data_path = "../../data/sem_data/" +if not os.path.exists(sem_data_path): + print("Sem data not found, cloning repository...") + os.system(f"git clone https://github.com/adperezm/sem_data.git {sem_data_path}") +else: + print("Sem data found.") + +stats_dir = "./" + +## % Convert 2D stats to 3D stats + +fluid_stats2D_fname = os.path.join(sem_data_path,"statistics/channel_nelv_300_scalars/", "merged_fluid_stats2D0.f00000") +scalar_stats2D_fname = os.path.join(sem_data_path,"statistics/channel_nelv_300_scalars/", "merged_scalar_stats2D0.f00000") +fluid_stats3D_fname = "merged_fluid_stats3D.f00000" +scalar_stats3D_fname = "merged_scalar_stats3D.f00000" + +from pysemtools.postprocessing.statistics.RS_budgets import convert_2Dstats_to_3D +from pysemtools.postprocessing.statistics.passive_scalar_budgets import convert_scalar_2Dstats_to_3D + +convert_2Dstats_to_3D(fluid_stats2D_fname, fluid_stats3D_fname, datatype='single') +convert_scalar_2Dstats_to_3D(scalar_stats2D_fname, scalar_stats3D_fname, datatype='single') + + +## % Process the statistics: additional fields + +from pysemtools.postprocessing.statistics.RS_budgets import compute_and_write_additional_pstat_fields +from pysemtools.postprocessing.statistics.passive_scalar_budgets import compute_and_write_additional_sstat_fields + + +compute_and_write_additional_pstat_fields( + stats_dir, + fluid_stats3D_fname, + fluid_stats3D_fname, + fluid_stats3D_fname, + if_write_mesh=True, + which_code='neko', + nek5000_stat_type='', + if_do_dssum_on_derivatives=False) + + +compute_and_write_additional_sstat_fields( + stats_dir, + scalar_stats3D_fname, + scalar_stats3D_fname, + scalar_stats3D_fname, + if_write_mesh=True, + if_do_dssum_on_derivatives=False) + + +## % Interpolate + +def geometric_stretching(dx0,stretching,dxmax,xmax): + import numpy as np + + dx_base = dx0 * (stretching**np.linspace(0,1000,1001)) + x_base = np.cumsum( dx_base ) + N_inrange = np.sum( x_base<=xmax ) + x_base = x_base[0:N_inrange] + x_base = x_base/np.max(x_base) + + x = np.zeros(N_inrange+1) + x[0] = 0.0 + x[1:N_inrange+1] = x_base + + return x + +def user_defined_interpolating_points(): + import numpy as np + import pysemtools.interpolation.pointclouds as pcs + import pysemtools.interpolation.utils as interp_utils + + print("generate interpolation points") + + # Create the coordinates of the plane you want + x_bbox = [0.0, 2.*np.pi] + + nz = 1 + nx = 201 + + # add one point to exclude since periodic + x = np.linspace( x_bbox[0] , x_bbox[1] , nx+1 ) + + # exclude last points since periodic + z = .001 + x = .5 * ( x[0:nz]+x[1:]) + + re_tau = 180. + dy0 = .3/re_tau + stretching = 1.05 + dymax = 15./re_tau + ymax = 1.0 + + y_base = geometric_stretching(dy0,stretching,dymax,ymax) + y_base[0] = 0.1/re_tau # to avoid potential issues when maping to the exact walls + y_base = y_base - 1 + ny = y_base.size + Nstruct = [nx, ny, nz] + print('Nstruct=', Nstruct) + + X,Y,Z = np.meshgrid(x, y_base, z, indexing="ij") + + xyz = interp_utils.transform_from_array_to_list(nx, ny, nz, [X, Y, Z]) + + return xyz, Nstruct + +# Call the functions + +# Call the functions +# NOTE: this should either be called from rank 0 only and then propoerly distributed into mpi ranks +# or each rank should generate its own points +if comm.Get_rank() == 0: + xyz , Nstruct = user_defined_interpolating_points() +else: + xyz = 0 + + +from pysemtools.postprocessing.statistics.RS_budgets import interpolate_all_stat_and_pstat_fields_onto_points +from pysemtools.postprocessing.statistics.passive_scalar_budgets import interpolate_all_stat_and_sstat_fields_onto_points + +interpolate_all_stat_and_pstat_fields_onto_points( + stats_dir, + fluid_stats3D_fname, + fluid_stats3D_fname, + fluid_stats3D_fname, + xyz, + which_code='neko', + nek5000_stat_type='', + if_do_dssum_before_interp=False, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True +) + +interpolate_all_stat_and_sstat_fields_onto_points( + stats_dir, + scalar_stats3D_fname, + scalar_stats3D_fname, + scalar_stats3D_fname, + xyz, + if_do_dssum_before_interp=False, + if_create_boundingBox_for_interp=False, + if_pass_points_to_rank0_only=True +) + + +## % Finish processing profiles and average + +def av_func(x): + import numpy as np + return np.mean(x, axis=0, keepdims=True) + + +from pysemtools.postprocessing.statistics.RS_budgets_interpolatedPoints_notMPI import read_interpolated_stat_hdf5_fields +from pysemtools.postprocessing.statistics.passive_scalar_budgets_interpolatedPoints_notMPI import read_interpolated_scalar_stat_hdf5_fields + +from pysemtools.postprocessing.statistics.RS_budgets_interpolatedPoints_notMPI import calculate_budgets_in_Cartesian +from pysemtools.postprocessing.statistics.passive_scalar_budgets_interpolatedPoints_notMPI import calculate_scalar_budgets_in_Cartesian + +Reynolds_number = 180 +Prandtl_number = 0.71 +If_average = True +If_convert_to_single = True +fname_averaged_fluid = 'averaged_and_renamed_interpolated_fluid_fields.hdf5' +fname_averaged_scalar = 'averaged_and_renamed_interpolated_scalar_fields.hdf5' +fname_budget_fluid = 'fluid_budgets.hdf5' +fname_budget_scalar = 'scalar_budgets.hdf5' + + +if comm.Get_rank() == 0: + read_interpolated_stat_hdf5_fields( + stats_dir , + Reynolds_number , + If_average , + If_convert_to_single , + Nstruct , + av_func , + output_fname = fname_averaged_fluid + ) + + read_interpolated_scalar_stat_hdf5_fields( + stats_dir , + Reynolds_number , + Prandtl_number , + If_average , + If_convert_to_single , + Nstruct , + av_func , + output_fname = fname_averaged_scalar + ) + + calculate_budgets_in_Cartesian( + path_to_files = stats_dir , + input_filename = fname_averaged_fluid , + output_filename = fname_budget_fluid ) + + calculate_scalar_budgets_in_Cartesian( + path_to_files = stats_dir , + input_scalar_filename = fname_averaged_scalar , + input_fluid_filename = fname_averaged_fluid , + output_filename = fname_budget_scalar )