diff --git a/docs/developers_guide/ocean/api.md b/docs/developers_guide/ocean/api.md index 4c1fe455d0..adbed63d37 100644 --- a/docs/developers_guide/ocean/api.md +++ b/docs/developers_guide/ocean/api.md @@ -198,6 +198,33 @@ viz.Viz.run ``` +### horiz_press_grad + +```{eval-rst} +.. currentmodule:: polaris.tasks.ocean.horiz_press_grad + +.. autosummary:: + :toctree: generated/ + + add_horiz_press_grad_tasks + + task.HorizPressGradTask + task.HorizPressGradTask.configure + + reference.Reference + reference.Reference.run + + init.Init + init.Init.run + + forward.Forward + forward.Forward.setup + + analysis.Analysis + analysis.Analysis.setup + analysis.Analysis.run +``` + ### ice_shelf_2d ```{eval-rst} diff --git a/docs/developers_guide/ocean/tasks/horiz_press_grad.md b/docs/developers_guide/ocean/tasks/horiz_press_grad.md new file mode 100644 index 0000000000..22ee5925dc --- /dev/null +++ b/docs/developers_guide/ocean/tasks/horiz_press_grad.md @@ -0,0 +1,72 @@ +(dev-ocean-horiz-press-grad)= + +# horiz_press_grad + +The {py:class}`polaris.tasks.ocean.horiz_press_grad.task.HorizPressGradTask` +provides two-column Omega tests for pressure-gradient-acceleration (`HPGA`) +accuracy and convergence across horizontal and vertical resolutions. + +The task family includes three variants: + +- `salinity_gradient` +- `temperature_gradient` +- `ztilde_gradient` + +## framework + +The config options for these tests are described in +{ref}`ocean-horiz-press-grad` in the User's Guide. + +The task dynamically rebuilds `init` and `forward` steps in `configure()` so +user-supplied `horiz_resolutions` and `vert_resolutions` in config files are +reflected in the work directory setup. + +### reference + +The class {py:class}`polaris.tasks.ocean.horiz_press_grad.reference.Reference` +defines a step that builds a high-fidelity reference HPGA solution in +`reference_solution.nc`. + +It computes pseudo-height and geometric-height profiles on a refined reference +grid, evaluates TEOS-10 specific volume, and computes HPGA at the center +column using a 4th-order finite-difference stencil. + +### init + +The class {py:class}`polaris.tasks.ocean.horiz_press_grad.init.Init` +defines one step per `(horiz_res, vert_res)` pair. + +Each `init` step: + +- builds and culls a planar two-cell mesh, +- sets up z-tilde vertical coordinates and profile fields, +- iteratively adjusts pseudo-bottom depth to match target geometric + water-column thickness, and +- writes `culled_mesh.nc` and `initial_state.nc`. + +### forward + +The class {py:class}`polaris.tasks.ocean.horiz_press_grad.forward.Forward` +defines one model step per horizontal resolution. + +It runs Omega from the corresponding `init` output and writes `output.nc` +(with `NormalVelocityTend` validation), using options from `forward.yaml`. + +### analysis + +The class {py:class}`polaris.tasks.ocean.horiz_press_grad.analysis.Analysis` +compares each `forward` result with: + +- the high-fidelity reference solution, and +- the Python-computed HPGA from `initial_state.nc`. + +The step writes: + +- `omega_vs_reference.nc` and `omega_vs_reference.png` +- `omega_vs_python.nc` and `omega_vs_python.png` + +and enforces regression criteria from `[horiz_press_grad]`, including: + +- allowed convergence-slope range for Omega-vs-reference, +- high-resolution RMS threshold for Omega-vs-reference, and +- RMS threshold for Omega-vs-Python consistency. diff --git a/docs/developers_guide/ocean/tasks/index.md b/docs/developers_guide/ocean/tasks/index.md index fc689f1c67..81fbe96807 100644 --- a/docs/developers_guide/ocean/tasks/index.md +++ b/docs/developers_guide/ocean/tasks/index.md @@ -13,6 +13,7 @@ cosine_bell customizable_viz external_gravity_wave geostrophic +horiz_press_grad divergent_2d ice_shelf_2d inertial_gravity_wave diff --git a/docs/users_guide/ocean/tasks/horiz_press_grad.md b/docs/users_guide/ocean/tasks/horiz_press_grad.md new file mode 100644 index 0000000000..e915cb390f --- /dev/null +++ b/docs/users_guide/ocean/tasks/horiz_press_grad.md @@ -0,0 +1,108 @@ +(ocean-horiz-press-grad)= + +# horizontal pressure gradient + +## description + +The `horiz_press_grad` tasks in `polaris.tasks.ocean.horiz_press_grad` +exercise Omega's hydrostatic pressure-gradient acceleration (`HPGA`) +for a two-column configuration with prescribed horizontal gradients. + +Each task includes: + +- a high-fidelity `reference` solution for HPGA, +- an `init` step at each horizontal/vertical resolution pair, +- a single-time-step `forward` run at each horizontal resolution, and +- an `analysis` step comparing Omega output with both the reference and + Python-initialized HPGA. + +The tasks currently provided are: + +``` +ocean/horiz_press_grad/salinity_gradient +ocean/horiz_press_grad/temperature_gradient +ocean/horiz_press_grad/ztilde_gradient +``` + +```{image} images/horiz_press_grad_salin_grad.png +:align: center +:width: 600 px +``` +## supported models + +These tasks currently support Omega only. + +## mesh + +The mesh is planar with two adjacent ocean cells. For each resolution in +`horiz_resolutions`, the spacing between the two columns is set by that value +(in km). + +## vertical grid + +The vertical coordinate is `z-tilde` with a uniform pseudo-height spacing for +each test in `vert_resolutions`. + +The `reference` step uses a finer spacing `vert_res` chosen so that every test +spacing is an integer multiple of `2 * vert_res`. This allows reference +interfaces to align with test midpoints for exact subsampling in analysis. + +(ocean-horiz-press-grad-config)= +## config options + +Shared options are in section `[horiz_press_grad]`: + +```cfg +# resolutions in km (distance between the two columns) +horiz_resolutions = [4.0, 3.0, 2.0, 1.5, 1.0, 0.75, 0.5] + +# vertical resolution in m for each two-column setup +vert_resolutions = [4.0, 3.0, 2.0, 1.5, 1.0, 0.75, 0.5] + +# geometric sea-surface and sea-floor midpoint values and x-gradients +geom_ssh_mid = 0.0 +geom_ssh_grad = 0.0 +geom_z_bot_mid = -500.0 +geom_z_bot_grad = 0.0 + +# pseudo-height bottom midpoint and x-gradient +z_tilde_bot_mid = -576.0 +z_tilde_bot_grad = 0.0 + +# midpoint and gradient node values for piecewise profiles +z_tilde_mid = [0.0, -48.0, -144.0, -288.0, -576.0] +z_tilde_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +temperature_mid = [22.0, 20.0, 14.0, 8.0, 5.0] +temperature_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +salinity_mid = [35.6, 35.4, 35.0, 34.8, 34.75] +salinity_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +# reference settings +reference_quadrature_method = gauss4 +reference_horiz_res = 0.25 +``` + +The three task variants specialize one horizontal gradient field: + +- `salinity_gradient`: nonzero `salinity_grad` +- `temperature_gradient`: nonzero `temperature_grad` +- `ztilde_gradient`: nonzero `z_tilde_bot_grad` + +## time step and run duration + +The `forward` step performs one model time step and outputs pressure-gradient +diagnostics used in the analysis. + +## analysis + +The `analysis` step computes and plots: + +- Omega RMS error versus reference (`omega_vs_reference.png`), including a + power-law fit and convergence slope, and +- Omega RMS difference versus Python initialization (`omega_vs_python.png`). + +The corresponding tabulated data are written to +`omega_vs_reference.nc` and `omega_vs_python.nc`. + diff --git a/docs/users_guide/ocean/tasks/images/horiz_press_grad_salin_grad.png b/docs/users_guide/ocean/tasks/images/horiz_press_grad_salin_grad.png new file mode 100644 index 0000000000..a359f89032 Binary files /dev/null and b/docs/users_guide/ocean/tasks/images/horiz_press_grad_salin_grad.png differ diff --git a/docs/users_guide/ocean/tasks/index.md b/docs/users_guide/ocean/tasks/index.md index 8d6befc217..07639bbe34 100644 --- a/docs/users_guide/ocean/tasks/index.md +++ b/docs/users_guide/ocean/tasks/index.md @@ -13,6 +13,7 @@ cosine_bell customizable_viz external_gravity_wave geostrophic +horiz_press_grad divergent_2d ice_shelf_2d inertial_gravity_wave diff --git a/polaris/ocean/model/mpaso_to_omega.yaml b/polaris/ocean/model/mpaso_to_omega.yaml index 74e75e4125..af057e8e4d 100644 --- a/polaris/ocean/model/mpaso_to_omega.yaml +++ b/polaris/ocean/model/mpaso_to_omega.yaml @@ -22,6 +22,12 @@ variables: cellsOnVertex: CellsOnVertex edgesOnVertex: EdgesOnVertex + # vertical coordinate + minLevelCell: MinLayerCell + maxLevelCell: MaxLayerCell + # currently hard-coded in horizontal mesh + # bottomDepth: BottomDepth + # tracers temperature: Temperature salinity: Salinity diff --git a/polaris/suites/ocean/omega_nightly.txt b/polaris/suites/ocean/omega_nightly.txt index 11eb3c4940..0b46e45f71 100644 --- a/polaris/suites/ocean/omega_nightly.txt +++ b/polaris/suites/ocean/omega_nightly.txt @@ -1,4 +1,7 @@ ocean/planar/barotropic_gyre/munk/free-slip +ocean/horiz_press_grad/salinity_gradient +ocean/horiz_press_grad/temperature_gradient +ocean/horiz_press_grad/ztilde_gradient ocean/planar/manufactured_solution/convergence_both/default ocean/planar/manufactured_solution/convergence_both/del2 ocean/planar/manufactured_solution/convergence_both/del4 diff --git a/polaris/suites/ocean/omega_pr.txt b/polaris/suites/ocean/omega_pr.txt index 57c518c8fb..770ca4efd8 100644 --- a/polaris/suites/ocean/omega_pr.txt +++ b/polaris/suites/ocean/omega_pr.txt @@ -1,6 +1,7 @@ #ocean/planar/barotropic_channel/default # Supported but fails ocean/planar/merry_go_round/default ocean/planar/barotropic_gyre/munk/free-slip +ocean/horiz_press_grad/salinity_gradient ocean/planar/manufactured_solution/convergence_both/default ocean/planar/manufactured_solution/convergence_both/del2 ocean/planar/manufactured_solution/convergence_both/del4 diff --git a/polaris/tasks/ocean/add_tasks.py b/polaris/tasks/ocean/add_tasks.py index 26d35beaa5..0593839752 100644 --- a/polaris/tasks/ocean/add_tasks.py +++ b/polaris/tasks/ocean/add_tasks.py @@ -7,6 +7,7 @@ add_external_gravity_wave_tasks as add_external_gravity_wave_tasks, ) from polaris.tasks.ocean.geostrophic import add_geostrophic_tasks +from polaris.tasks.ocean.horiz_press_grad import add_horiz_press_grad_tasks from polaris.tasks.ocean.ice_shelf_2d import add_ice_shelf_2d_tasks from polaris.tasks.ocean.inertial_gravity_wave import ( add_inertial_gravity_wave_tasks as add_inertial_gravity_wave_tasks, @@ -36,6 +37,7 @@ def add_ocean_tasks(component): add_baroclinic_channel_tasks(component=component) add_barotropic_channel_tasks(component=component) add_barotropic_gyre_tasks(component=component) + add_horiz_press_grad_tasks(component=component) add_ice_shelf_2d_tasks(component=component) add_inertial_gravity_wave_tasks(component=component) add_internal_wave_tasks(component=component) diff --git a/polaris/tasks/ocean/horiz_press_grad/__init__.py b/polaris/tasks/ocean/horiz_press_grad/__init__.py new file mode 100644 index 0000000000..6e670e22e9 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/__init__.py @@ -0,0 +1,19 @@ +from polaris.tasks.ocean.horiz_press_grad.task import HorizPressGradTask + + +def add_horiz_press_grad_tasks(component): + """ + Add tasks for various tests involving the horizonal pressure-gradient + acceleration between two adjacent ocean columns + + Parameters + ---------- + component : polaris.tasks.ocean.Ocean + the ocean component that the tasks will be added to + """ + for name in [ + 'salinity_gradient', + 'temperature_gradient', + 'ztilde_gradient', + ]: + component.add_task(HorizPressGradTask(component=component, name=name)) diff --git a/polaris/tasks/ocean/horiz_press_grad/analysis.py b/polaris/tasks/ocean/horiz_press_grad/analysis.py new file mode 100644 index 0000000000..c7dd120b08 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/analysis.py @@ -0,0 +1,622 @@ +import matplotlib.pyplot as plt +import numpy as np +import xarray as xr + +from polaris.ocean.model import OceanIOStep +from polaris.ocean.vertical.ztilde import Gravity, RhoSw +from polaris.viz import use_mplstyle + + +class Analysis(OceanIOStep): + """ + A step for analyzing two-column HPGA errors versus a reference solution + and versus the Python-computed initial-state solution. + + Attributes + ---------- + dependencies_dict : dict + A dictionary of dependent steps: + + reference : polaris.Step + The reference step that produces ``reference_solution.nc`` + + init : dict + Mapping from horizontal resolution (km) to ``Init`` step + + forward : dict + Mapping from horizontal resolution (km) to ``Forward`` step + """ + + def __init__(self, component, indir, dependencies): + """ + Create the analysis step + + Parameters + ---------- + component : polaris.Component + The component the step belongs to + + indir : str + The subdirectory that the task belongs to, that this step will + go into a subdirectory of + + dependencies : dict + A dictionary of dependent steps + """ + super().__init__(component=component, name='analysis', indir=indir) + + self.dependencies_dict = dependencies + + self.add_output_file('omega_vs_reference.png') + self.add_output_file('omega_vs_reference.nc') + self.add_output_file('omega_vs_python.png') + self.add_output_file('omega_vs_python.nc') + + def setup(self): + """ + Add inputs from reference, init and forward steps + """ + super().setup() + + section = self.config['horiz_press_grad'] + horiz_resolutions = section.getexpression('horiz_resolutions') + assert horiz_resolutions is not None + + reference = self.dependencies_dict['reference'] + self.add_input_file( + filename='reference_solution.nc', + work_dir_target=f'{reference.path}/reference_solution.nc', + ) + + init_steps = self.dependencies_dict['init'] + forward_steps = self.dependencies_dict['forward'] + for resolution in horiz_resolutions: + init = init_steps[resolution] + forward = forward_steps[resolution] + self.add_input_file( + filename=f'init_r{resolution:02g}.nc', + work_dir_target=f'{init.path}/initial_state.nc', + ) + self.add_input_file( + filename=f'output_r{resolution:02g}.nc', + work_dir_target=f'{forward.path}/output.nc', + ) + + def run(self): + """ + Run this step of the test case + """ + plt.switch_backend('Agg') + logger = self.logger + config = self.config + + section = config['horiz_press_grad'] + horiz_resolutions = section.getexpression('horiz_resolutions') + assert horiz_resolutions is not None, ( + 'The "horiz_resolutions" configuration option must be set in ' + 'the "horiz_press_grad" section.' + ) + omega_vs_reference_convergence_rate_min = section.getfloat( + 'omega_vs_reference_convergence_rate_min' + ) + assert omega_vs_reference_convergence_rate_min is not None, ( + 'The "omega_vs_reference_convergence_rate_min" configuration ' + 'option must be set in the "horiz_press_grad" section.' + ) + omega_vs_reference_convergence_rate_max = section.getfloat( + 'omega_vs_reference_convergence_rate_max' + ) + assert omega_vs_reference_convergence_rate_max is not None, ( + 'The "omega_vs_reference_convergence_rate_max" configuration ' + 'option must be set in the "horiz_press_grad" section.' + ) + omega_vs_reference_convergence_fit_max_resolution = section.getfloat( + 'omega_vs_reference_convergence_fit_max_resolution' + ) + assert omega_vs_reference_convergence_fit_max_resolution is not None, ( + 'The "omega_vs_reference_convergence_fit_max_resolution" ' + 'configuration option must be set in the "horiz_press_grad" ' + 'section.' + ) + omega_vs_reference_high_res_rms_threshold = section.getfloat( + 'omega_vs_reference_high_res_rms_threshold' + ) + assert omega_vs_reference_high_res_rms_threshold is not None, ( + 'The "omega_vs_reference_high_res_rms_threshold" ' + 'configuration option must be set in the "horiz_press_grad" ' + 'section.' + ) + omega_vs_polaris_rms_threshold = section.getfloat( + 'omega_vs_polaris_rms_threshold' + ) + assert omega_vs_polaris_rms_threshold is not None, ( + 'The "omega_vs_polaris_rms_threshold" configuration option ' + 'must be set in the "horiz_press_grad" section.' + ) + + ds_ref = self.open_model_dataset('reference_solution.nc') + if ds_ref.sizes.get('nCells', 0) <= 2: + raise ValueError( + 'The reference solution requires at least 3 columns so that ' + 'the central column is nCells=2.' + ) + + ref_z = ds_ref.ZTildeInter.isel(Time=0, nCells=2).values + ref_hpga = ds_ref.HPGAInter.isel(Time=0).values + ref_valid_grad_mask = ds_ref.ValidGradInterMask.isel(Time=0).values + + ref_errors = [] + py_errors = [] + + for resolution in horiz_resolutions: + ds_init = self.open_model_dataset(f'init_r{resolution:02g}.nc') + ds_out = self.open_model_dataset( + f'output_r{resolution:02g}.nc', decode_times=False + ) + + edge_index, cells_on_edge = _get_internal_edge(ds_init) + cell0, cell1 = cells_on_edge + + z_tilde_forward = _get_forward_z_tilde_edge_mid( + ds_out=ds_out, + cell0=cell0, + cell1=cell1, + ) + hpga_forward = ds_out.NormalVelocityTend.isel( + Time=0, nEdges=edge_index + ).values + + # maxLevelCell is one-based (Fortran indexing), convert to + # zero-based and use the shallowest valid bottom among the two + # cells that bound the internal edge. + max_level_cells = ds_init.maxLevelCell.isel( + nCells=[cell0, cell1] + ).values.astype(int) + max_level_index = int(np.min(max_level_cells) - 1) + if max_level_index < 0: + raise ValueError( + f'Invalid maxLevelCell values {max_level_cells} at ' + f'resolution {resolution:g} km.' + ) + + forward_valid_mask = np.zeros_like(hpga_forward, dtype=bool) + forward_valid_mask[: max_level_index + 1] = True + + sampled_ref_hpga = _sample_reference_without_interpolation( + ref_z=ref_z, + ref_values=ref_hpga, + target_z=z_tilde_forward, + ref_valid_mask=ref_valid_grad_mask, + target_valid_mask=forward_valid_mask, + ) + + hpga_ref_diff = hpga_forward - sampled_ref_hpga + ref_errors.append(_rms_error(hpga_ref_diff)) + + z_tilde_init = ( + 0.5 + * ( + ds_init.ZTildeMid.isel(Time=0, nCells=cell0) + + ds_init.ZTildeMid.isel(Time=0, nCells=cell1) + ).values + ) + _check_vertical_match( + z_ref=z_tilde_init, + z_test=z_tilde_forward, + msg=( + 'ZTilde mismatch between Python init and Omega forward ' + f'at resolution {resolution:g} km' + ), + valid_mask=forward_valid_mask, + ) + + hpga_init = ds_init.HPGA.isel(Time=0).values + hpga_diff = hpga_forward - hpga_init + py_errors.append(_rms_error(hpga_diff[forward_valid_mask])) + + resolution_array = np.asarray(horiz_resolutions, dtype=float) + ref_error_array = np.asarray(ref_errors, dtype=float) + py_error_array = np.asarray(py_errors, dtype=float) + + fit_mask = ( + resolution_array + <= omega_vs_reference_convergence_fit_max_resolution + ) + + ref_fit, ref_slope, ref_intercept = _power_law_fit( + x=resolution_array, + y=ref_error_array, + fit_mask=fit_mask, + ) + + _write_dataset( + filename='omega_vs_reference.nc', + resolution_km=resolution_array, + rms_error=ref_error_array, + fit=ref_fit, + slope=ref_slope, + intercept=ref_intercept, + y_name='rms_error_vs_reference', + y_units='m s-2', + ) + _write_dataset( + filename='omega_vs_python.nc', + resolution_km=resolution_array, + rms_error=py_error_array, + y_name='rms_error_vs_python', + y_units='m s-2', + ) + + _plot_errors( + resolution_km=resolution_array, + rms_error=ref_error_array, + fit=ref_fit, + slope=ref_slope, + y_label='RMS error in HPGA (m s-2)', + title='Omega HPGA Error vs Reference Solution', + output='omega_vs_reference.png', + ) + _plot_errors( + resolution_km=resolution_array, + rms_error=py_error_array, + y_label='RMS difference in HPGA (m s-2)', + title='Omega vs Polaris HPGA RMS Difference', + output='omega_vs_python.png', + ) + + logger.info(f'Omega-vs-reference convergence slope: {ref_slope:1.3f}') + logger.info( + 'Omega-vs-reference fit uses resolutions (km): ' + f'{_format_resolution_list(resolution_array[fit_mask])}' + ) + res_error_pairs = _format_resolution_error_pairs( + resolution_array, ref_error_array + ) + logger.info( + 'Omega-vs-Polaris RMS differences by resolution: ' + f'{res_error_pairs}' + ) + failing_polaris = py_error_array > omega_vs_polaris_rms_threshold + if np.any(failing_polaris): + failing_text = ', '.join( + [ + f'{resolution_array[index]:g} km: ' + f'{py_error_array[index]:.3e}' + for index in np.where(failing_polaris)[0] + ] + ) + raise ValueError( + 'Omega-vs-Polaris RMS difference exceeds ' + f'omega_vs_polaris_rms_threshold=' + f'{omega_vs_polaris_rms_threshold:.3e} at: {failing_text}' + ) + + highest_resolution_index = int(np.argmin(resolution_array)) + highest_resolution = float(resolution_array[highest_resolution_index]) + highest_resolution_ref_error = float( + ref_error_array[highest_resolution_index] + ) + if ( + highest_resolution_ref_error + > omega_vs_reference_high_res_rms_threshold + ): + raise ValueError( + 'Omega-vs-reference RMS error at highest resolution ' + f'({highest_resolution:g} km) is ' + f'{highest_resolution_ref_error:.3e}, which exceeds ' + 'omega_vs_reference_high_res_rms_threshold ' + f'({omega_vs_reference_high_res_rms_threshold:.3e}).' + ) + + if not ( + omega_vs_reference_convergence_rate_min + <= ref_slope + <= omega_vs_reference_convergence_rate_max + ): + raise ValueError( + 'Omega-vs-reference convergence slope is outside the ' + 'allowed range: ' + f'{ref_slope:.3f} not in ' + f'[{omega_vs_reference_convergence_rate_min:.3f}, ' + f'{omega_vs_reference_convergence_rate_max:.3f}]' + ) + + +def _get_internal_edge(ds_init: xr.Dataset) -> tuple[int, tuple[int, int]]: + """ + Determine the edge that connects the two valid cells in the two-column + mesh. + """ + if 'cellsOnEdge' not in ds_init: + raise ValueError('cellsOnEdge is required in initial_state.nc') + + cells_on_edge = ds_init.cellsOnEdge.values.astype(int) + if cells_on_edge.ndim != 2 or cells_on_edge.shape[1] != 2: + raise ValueError('cellsOnEdge must have shape (nEdges, 2).') + + valid = np.logical_and(cells_on_edge[:, 0] > 0, cells_on_edge[:, 1] > 0) + valid_edges = np.where(valid)[0] + if len(valid_edges) != 1: + raise ValueError( + 'Expected exactly one edge with two valid cells in the ' + f'two-column mesh, found {len(valid_edges)}.' + ) + + edge_index = int(valid_edges[0]) + # convert from 1-based MPAS indexing to 0-based indexing + cell0 = int(cells_on_edge[edge_index, 0] - 1) + cell1 = int(cells_on_edge[edge_index, 1] - 1) + return edge_index, (cell0, cell1) + + +def _get_forward_z_tilde_edge_mid( + ds_out: xr.Dataset, + cell0: int, + cell1: int, +) -> np.ndarray: + """ + Compute edge-centered pseudo-height at layer midpoints from Omega output + pressure. + """ + pressure_mid = ds_out.PressureMid.isel(Time=0) + pressure_edge_mid = 0.5 * ( + pressure_mid.isel(nCells=cell0) + pressure_mid.isel(nCells=cell1) + ) + return (-pressure_edge_mid / (RhoSw * Gravity)).values + + +def _sample_reference_without_interpolation( + ref_z: np.ndarray, + ref_values: np.ndarray, + target_z: np.ndarray, + ref_valid_mask: np.ndarray | None = None, + target_valid_mask: np.ndarray | None = None, + abs_tol: float = 1.0e-6, + rel_tol: float = 1.0e-10, +) -> np.ndarray: + """ + Sample reference values at target z-tilde values by exact matching within a + strict tolerance, without interpolation. + """ + ref_z = np.asarray(ref_z, dtype=float) + ref_values = np.asarray(ref_values, dtype=float) + target_z = np.asarray(target_z, dtype=float) + if ref_valid_mask is not None: + ref_valid_mask = np.asarray(ref_valid_mask, dtype=bool) + if ref_valid_mask.shape != ref_z.shape: + raise ValueError( + 'ref_valid_mask must have the same shape as ref_z.' + ) + if target_valid_mask is not None: + target_valid_mask = np.asarray(target_valid_mask, dtype=bool) + if target_valid_mask.shape != target_z.shape: + raise ValueError( + 'target_valid_mask must have the same shape as target_z.' + ) + + sampled = np.full_like(target_z, np.nan, dtype=float) + valid_target = np.isfinite(target_z) + if target_valid_mask is not None: + valid_target = np.logical_and(valid_target, target_valid_mask) + valid_ref = np.logical_and(np.isfinite(ref_z), np.isfinite(ref_values)) + if ref_valid_mask is not None: + valid_ref = np.logical_and(valid_ref, ref_valid_mask) + + # The bottom valid forward layer hits bathymetry and should not be used + # in the reference comparison. + if np.any(valid_target): + deepest = int(np.where(valid_target)[0][-1]) + valid_target[deepest] = False + + ref_z_valid = ref_z[valid_ref] + ref_values_valid = ref_values[valid_ref] + + target_valid = target_z[valid_target] + if len(target_valid) == 0: + return sampled + if len(ref_z_valid) == 0: + raise ValueError( + 'No valid reference z-tilde values remain after applying ' + 'ValidGradInterMask.' + ) + + dz = np.abs(ref_z_valid[:, np.newaxis] - target_valid[np.newaxis, :]) + indices = np.argmin(dz, axis=0) + min_dz = dz[indices, np.arange(len(indices))] + + tol = np.maximum(abs_tol, rel_tol * np.maximum(1.0, np.abs(target_valid))) + if np.any(min_dz > tol): + max_mismatch = float(np.max(min_dz)) + raise ValueError( + f'Reference z-tilde values do not match Omega z-tilde values ' + f'closely enough for subsampling without interpolation. max ' + f'|dz|={max_mismatch}' + ) + + sampled[valid_target] = ref_values_valid[indices] + return sampled + + +def _check_vertical_match( + z_ref: np.ndarray, + z_test: np.ndarray, + msg: str, + valid_mask: np.ndarray | None = None, + abs_tol: float = 1.0e-6, + rel_tol: float = 1.0e-10, +) -> None: + """ + Ensure two pseudo-height arrays match within strict tolerances. + """ + z_ref = np.asarray(z_ref, dtype=float) + z_test = np.asarray(z_test, dtype=float) + if z_ref.shape != z_test.shape: + raise ValueError( + f'{msg}: shape mismatch {z_ref.shape} != {z_test.shape}' + ) + + if valid_mask is not None: + valid_mask = np.asarray(valid_mask, dtype=bool) + if valid_mask.shape != z_ref.shape: + raise ValueError( + f'{msg}: valid_mask shape mismatch ' + f'{valid_mask.shape} != {z_ref.shape}' + ) + + valid = np.logical_and(np.isfinite(z_ref), np.isfinite(z_test)) + if valid_mask is not None: + valid = np.logical_and(valid, valid_mask) + if not np.any(valid): + raise ValueError(f'{msg}: no valid levels for comparison.') + + diff = np.abs(z_ref[valid] - z_test[valid]) + tol = np.maximum(abs_tol, rel_tol * np.maximum(1.0, np.abs(z_ref[valid]))) + if np.any(diff > tol): + raise ValueError( + f'{msg}: max |dz| = {float(np.max(diff))}, exceeds tolerance.' + ) + + +def _rms_error(values: np.ndarray) -> float: + """ + Compute RMS over finite values. + """ + values = np.asarray(values, dtype=float) + valid = np.isfinite(values) + if not np.any(valid): + raise ValueError('No finite values available for RMS error.') + return float(np.sqrt(np.mean(values[valid] ** 2))) + + +def _format_resolution_list(resolution_km: np.ndarray) -> str: + """ + Format a resolution array as a compact list of floats. + """ + values = [f'{float(resolution):g}' for resolution in resolution_km] + return f'[{", ".join(values)}]' + + +def _format_resolution_error_pairs( + resolution_km: np.ndarray, + rms_error: np.ndarray, +) -> str: + """ + Format resolution/error pairs as readable key-value text. + """ + pairs = [ + f'{float(resolution):g} km: {float(error):.3e}' + for resolution, error in zip(resolution_km, rms_error, strict=True) + ] + return '; '.join(pairs) + + +def _power_law_fit( + x: np.ndarray, + y: np.ndarray, + fit_mask: np.ndarray | None = None, +) -> tuple[np.ndarray, float, float]: + """ + Fit y = 10**b * x**m in log10 space. + """ + x = np.asarray(x, dtype=float) + y = np.asarray(y, dtype=float) + valid = np.logical_and.reduce( + (np.isfinite(x), np.isfinite(y), x > 0.0, y > 0.0) + ) + if fit_mask is not None: + fit_mask = np.asarray(fit_mask, dtype=bool) + if fit_mask.shape != x.shape: + raise ValueError( + 'fit_mask must have the same shape as x and y for ' + 'power-law fitting.' + ) + valid = np.logical_and(valid, fit_mask) + + if np.count_nonzero(valid) < 2: + raise ValueError( + 'At least two positive finite points are required for fit.' + ) + + poly = np.polyfit(np.log10(x[valid]), np.log10(y[valid]), 1) + slope = float(poly[0]) + intercept = float(poly[1]) + fit = x**slope * 10.0**intercept + return fit, slope, intercept + + +def _write_dataset( + filename: str, + resolution_km: np.ndarray, + rms_error: np.ndarray, + y_name: str, + y_units: str, + fit: np.ndarray | None = None, + slope: float | None = None, + intercept: float | None = None, +) -> None: + """ + Write data used in a convergence plot to netCDF. + """ + nres = len(resolution_km) + ds = xr.Dataset() + ds['resolution_km'] = xr.DataArray( + data=resolution_km, + dims=['nResolutions'], + attrs={'long_name': 'horizontal resolution', 'units': 'km'}, + ) + ds[y_name] = xr.DataArray( + data=rms_error, + dims=['nResolutions'], + attrs={'long_name': y_name.replace('_', ' '), 'units': y_units}, + ) + if fit is not None: + ds['power_law_fit'] = xr.DataArray( + data=fit, + dims=['nResolutions'], + attrs={ + 'long_name': 'power-law fit to rms error', + 'units': y_units, + }, + ) + if slope is not None: + ds.attrs['fit_slope'] = slope + if intercept is not None: + ds.attrs['fit_intercept_log10'] = intercept + ds.attrs['nResolutions'] = nres + ds.to_netcdf(filename) + + +def _plot_errors( + resolution_km: np.ndarray, + rms_error: np.ndarray, + y_label: str, + title: str, + output: str, + fit: np.ndarray | None = None, + slope: float | None = None, +) -> None: + """ + Plot RMS error vs. horizontal resolution with a power-law fit. + """ + use_mplstyle() + fig = plt.figure() + ax = fig.add_subplot(111) + + if fit is not None: + if slope is None: + raise ValueError('slope must be provided when fit is provided.') + ax.loglog( + resolution_km, + fit, + 'k', + label=f'power-law fit (slope={slope:1.3f})', + ) + ax.loglog(resolution_km, rms_error, 'o', label='RMS error') + + ax.set_xlabel('Horizontal resolution (km)') + ax.set_ylabel(y_label) + ax.set_title(title) + ax.legend(loc='lower right') + ax.invert_xaxis() + fig.savefig(output, bbox_inches='tight', pad_inches=0.1) + plt.close() diff --git a/polaris/tasks/ocean/horiz_press_grad/column.py b/polaris/tasks/ocean/horiz_press_grad/column.py new file mode 100644 index 0000000000..4c7995027d --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/column.py @@ -0,0 +1,138 @@ +from typing import Callable + +import numpy as np +from scipy.interpolate import PchipInterpolator + +from polaris.config import PolarisConfigParser + + +def get_array_from_mid_grad( + config: PolarisConfigParser, name: str, x: np.ndarray +) -> np.ndarray: + """ + Get an array at a given set of horizontal points based on values defined + at the midpoint (x=0) and their constant gradient with respect to x. + + Parameters + ---------- + config : PolarisConfigParser + The configuration parser containing the options "{name}_mid" and + "{name}_grad" in the "horiz_press_grad" section. + x : np.ndarray + The x-coordinates at which to evaluate the array + name : str + The base name of the configuration options + + Returns + ------- + array : np.ndarray + The array evaluated at the given x-coordinates + """ + section = config['horiz_press_grad'] + mid = section.getnumpy(f'{name}_mid') + grad = section.getnumpy(f'{name}_grad') + + assert mid is not None, ( + f'The "{name}_mid" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + assert grad is not None, ( + f'The "{name}_grad" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + + if isinstance(mid, (list, tuple, np.ndarray)): + col_count = len(x) + node_count = len(mid) + + array = np.zeros((col_count, node_count), dtype=float) + + for i in range(col_count): + array[i, :] = np.array(mid) + x[i] * np.array(grad) + elif np.isscalar(mid): + array = mid + x * grad + else: + raise ValueError( + f'The "{name}_mid" configuration option must be a scalar or a ' + 'list, tuple or numpy.ndarray.' + ) + + return array + + +def get_pchip_interpolator( + z_tilde_nodes: np.ndarray, + values_nodes: np.ndarray, + name: str, +) -> Callable[[np.ndarray], np.ndarray]: + """ + Create a monotone PCHIP interpolator for values defined at z-tilde nodes. + + Parameters + ---------- + z_tilde_nodes : np.ndarray + One-dimensional z-tilde node locations. Must be strictly monotonic + (increasing or decreasing). + values_nodes : np.ndarray + One-dimensional values at ``z_tilde_nodes``. + name : str + A descriptive name used in error messages. + + Returns + ------- + interpolator : callable + A function that maps target z-tilde values to interpolated values. + Targets must lie within the node range; extrapolation is not allowed. + """ + z_tilde_nodes = np.asarray(z_tilde_nodes, dtype=float) + values_nodes = np.asarray(values_nodes, dtype=float) + + if z_tilde_nodes.ndim != 1 or values_nodes.ndim != 1: + raise ValueError('z_tilde_nodes and values_nodes must be 1-D arrays.') + if len(z_tilde_nodes) != len(values_nodes): + raise ValueError( + f'Lengths of z_tilde_nodes and {name} nodes must match.' + ) + if len(z_tilde_nodes) < 2: + raise ValueError('At least two z_tilde nodes are required.') + + dz = np.diff(z_tilde_nodes) + is_increasing = np.all(dz > 0.0) + is_decreasing = np.all(dz < 0.0) + if not (is_increasing or is_decreasing): + raise ValueError( + 'z_tilde_nodes must be strictly monotonic (increasing or ' + 'decreasing).' + ) + + if is_decreasing: + x = -z_tilde_nodes + else: + x = z_tilde_nodes + + x_min = x.min() + x_max = x.max() + + interpolator = PchipInterpolator(x, values_nodes, extrapolate=False) + + def _interp(z_tilde_targets: np.ndarray) -> np.ndarray: + z_tilde_targets = np.asarray(z_tilde_targets, dtype=float) + if np.any(~np.isfinite(z_tilde_targets)): + raise ValueError('Target z_tilde values must be finite.') + if is_decreasing: + x_target = -z_tilde_targets + else: + x_target = z_tilde_targets + if np.any(x_target < x_min) or np.any(x_target > x_max): + raise ValueError( + f'Target z_tilde values for {name} must fall within the ' + 'node range; extrapolation is not supported.' + ) + values = interpolator(x_target) + if np.any(~np.isfinite(values)): + raise ValueError( + f'PCHIP interpolation produced non-finite values for {name}.' + ) + return values + + return _interp diff --git a/polaris/tasks/ocean/horiz_press_grad/forward.py b/polaris/tasks/ocean/horiz_press_grad/forward.py new file mode 100644 index 0000000000..c1080b7c56 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/forward.py @@ -0,0 +1,64 @@ +from polaris.ocean.model import OceanModelStep +from polaris.resolution import resolution_to_string + + +class Forward(OceanModelStep): + """ + This step performs a forward run for a single time step, writing out + the normal-velocity tendency (which is just the pressure gradient + acceleration) along with other diagnostics. + """ + + def __init__(self, component, horiz_res, indir): + """ + Create the step + + Parameters + ---------- + component : polaris.Component + The component the step belongs to + + horiz_res : float + The horizontal resolution in km + + indir : str + The subdirectory that the task belongs to, that this step will + go into a subdirectory of + """ + self.horiz_res = horiz_res + name = f'forward_{resolution_to_string(horiz_res)}' + super().__init__( + component=component, + name=name, + indir=indir, + ntasks=1, + min_tasks=1, + openmp_threads=1, + ) + + init_dir = f'init_{resolution_to_string(horiz_res)}' + self.add_input_file( + filename='initial_state.nc', + target=f'../{init_dir}/initial_state.nc', + ) + self.add_input_file( + filename='mesh.nc', + target=f'../{init_dir}/culled_mesh.nc', + ) + + # TODO: remove as soon as Omega no longer hard-codes this file + self.add_input_file(filename='OmegaMesh.nc', target='initial_state.nc') + + validate_vars = ['NormalVelocityTend'] + self.add_output_file('output.nc', validate_vars=validate_vars) + + def setup(self): + """ + Fill in config options in the forward.yaml file based on config options + """ + super().setup() + + self.add_yaml_file( + 'polaris.tasks.ocean.horiz_press_grad', + 'forward.yaml', + ) diff --git a/polaris/tasks/ocean/horiz_press_grad/forward.yaml b/polaris/tasks/ocean/horiz_press_grad/forward.yaml new file mode 100644 index 0000000000..00219ae168 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/forward.yaml @@ -0,0 +1,48 @@ +Omega: + TimeIntegration: + TimeStep: 0000_00:00:01 + StartTime: 0001-01-01_00:00:00 + StopTime: 0001-01-01_00:00:01 + Tendencies: + ThicknessFluxTendencyEnable: false + PVTendencyEnable: false + KETendencyEnable: false + SSHTendencyEnable: false + VelDiffTendencyEnable: false + VelHyperDiffTendencyEnable: false + WindForcingTendencyEnable: false + BottomDragTendencyEnable: false + TracerHorzAdvTendencyEnable: false + TracerDiffTendencyEnable: false + TracerHyperDiffTendencyEnable: false + UseCustomTendency: false + ManufacturedSolutionTendency: false + PressureGradTendencyEnable: true + IOStreams: + InitialVertCoord: + Filename: initial_state.nc + InitialState: + UsePointerFile: false + Filename: initial_state.nc + History: + Filename: output.nc + Freq: 1 + FreqUnits: Seconds + IfExists: append + # effectively never + FileFreq: 9999 + FileFreqUnits: years + Contents: + - ZMid + - ZInterface + - PressureMid + - PressureInterface + - LayerThickness + - Temperature + - Salinity + - SpecVol + - GeopotentialMid + - NormalVelocityTend + - BottomDepth + - MinLayerCell + - MaxLayerCell diff --git a/polaris/tasks/ocean/horiz_press_grad/horiz_press_grad.cfg b/polaris/tasks/ocean/horiz_press_grad/horiz_press_grad.cfg new file mode 100644 index 0000000000..f51bd8a912 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/horiz_press_grad.cfg @@ -0,0 +1,97 @@ +# Options related to the vertical grid +[vertical_grid] + +# the type of vertical grid +grid_type = uniform + +# The type of vertical coordinate (e.g. z-level, z-star, z-tilde, sigma) +coord_type = z-tilde + +# Whether to use "partial" or "full", or "None" to not alter the topography +partial_cell_type = None + +# The minimum fraction of a layer for partial cells +min_pc_fraction = 0.1 + + +# Options related the ocean component +[ocean] +# Which model, MPAS-Ocean or Omega, is used +model = mpas-ocean + +# Equation of state type, defaults to mpas-ocean default +eos_type = teos-10 + + +# config options for horizontal pressure gradient testcases +[horiz_press_grad] + +# resolutions in km (the distance between the two columns) +horiz_resolutions = [4.0, 3.0, 2.0, 1.5, 1.0, 0.75, 0.5] + +# vertical resolution in m +vert_resolutions = [4.0, 3.0, 2.0, 1.5, 1.0, 0.75, 0.5] + +# geometric sea surface height at the midpoint between the two columns +# and its gradient +geom_ssh_mid = 0.0 +# m / km +geom_ssh_grad = 0.0 + +# geometric sea floor height at the midpoint between the two columns +# and its gradient +geom_z_bot_mid = -500.0 +# m / km +geom_z_bot_grad = 0.0 + +# Pseudo-height of the bottom of the ocean (with some buffer compared with +# geometric sea floor height) at the midpoint between the two columns +# and its gradient +z_tilde_bot_mid = -576.0 +# m / km +z_tilde_bot_grad = 0.0 + +# pseudo-height, temperatures and salinities for piecewise linear initial +# conditions at the midpoint between the two columns and their gradients +z_tilde_mid = [0.0, -48.0, -144.0, -288.0, -576.0] +# m / km +z_tilde_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +temperature_mid = [22.0, 20.0, 14.0, 8.0, 5.0] +# deg C / km +temperature_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +salinity_mid = [35.6, 35.4, 35.0, 34.8, 34.75] +# g/kg / km +salinity_grad = [0.0, 0.0, 0.0, 0.0, 0.0] + +# number of iterations over which to allow water column adjustments +water_col_adjust_iter_count = 6 + +# Early stopping threshold for fractional change in geometric water-column +# thickness between adjustment iterations +water_col_adjust_frac_change_threshold = 1.0e-12 + + +# reference solution quadrature method +reference_quadrature_method = gauss4 + +# reference solution's horizontal resolution in km +reference_horiz_res = 0.25 + +# Maximum RMS difference allowed between Omega forward and Polaris/Python +# init HPGA at each resolution +omega_vs_polaris_rms_threshold = 1.0e-10 + +# Maximum RMS error allowed between Omega and the reference solution at the +# highest resolution +omega_vs_reference_high_res_rms_threshold = 1.0e-6 + +# Allowed range for the Omega-vs-reference convergence slope from the +# power-law fit +omega_vs_reference_convergence_rate_min = 1.6 +omega_vs_reference_convergence_rate_max = 2.1 + +# Maximum horizontal resolution (km) included in the power-law fit. All +# resolutions are still shown in output plots. +omega_vs_reference_convergence_fit_max_resolution = 4.0 diff --git a/polaris/tasks/ocean/horiz_press_grad/init.py b/polaris/tasks/ocean/horiz_press_grad/init.py new file mode 100644 index 0000000000..82d0599e6f --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/init.py @@ -0,0 +1,672 @@ +import numpy as np +import xarray as xr +from mpas_tools.io import write_netcdf +from mpas_tools.mesh.conversion import convert, cull +from mpas_tools.planar_hex import make_planar_hex_mesh + +from polaris.ocean.eos import compute_specvol +from polaris.ocean.model import OceanIOStep +from polaris.ocean.vertical import init_vertical_coord +from polaris.ocean.vertical.ztilde import ( + # temporary until we can get this for GCD + Gravity, + RhoSw, + geom_height_from_pseudo_height, + pressure_from_z_tilde, +) +from polaris.resolution import resolution_to_string +from polaris.tasks.ocean.horiz_press_grad.column import ( + get_array_from_mid_grad, + get_pchip_interpolator, +) + + +class Init(OceanIOStep): + """ + A step for creating a mesh and initial condition for two column + test cases + + Attributes + ---------- + horiz_res : float + The horizontal resolution in km + + vert_res : float + The vertical resolution in m + """ + + def __init__(self, component, horiz_res, vert_res, indir): + """ + Create the step + + Parameters + ---------- + component : polaris.Component + The component the step belongs to + + horiz_res : float + The horizontal resolution in km + + vert_res : float + The vertical resolution in m + + indir : str + The subdirectory that the task belongs to, that this step will + go into a subdirectory of + """ + self.horiz_res = horiz_res + self.vert_res = vert_res + name = f'init_{resolution_to_string(horiz_res)}' + super().__init__(component=component, name=name, indir=indir) + for file in [ + 'culled_mesh.nc', + 'initial_state.nc', + ]: + self.add_output_file(file) + + def run(self): + """ + Run this step of the test case + """ + logger = self.logger + # logger.setLevel(logging.INFO) + config = self.config + hpg_section = config['horiz_press_grad'] + if config.get('ocean', 'model') != 'omega': + raise ValueError( + 'The horiz_press_grad test case is only supported for the ' + 'Omega ocean model.' + ) + + horiz_res = self.horiz_res + vert_res = self.vert_res + + z_tilde_bot_mid = hpg_section.getfloat('z_tilde_bot_mid') + + assert z_tilde_bot_mid is not None, ( + 'The "z_tilde_bot_mid" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + + # it needs to be an error if the full water column can't be evenly + # divided by the resolution, because the later analysis will fail + if (-z_tilde_bot_mid / vert_res) % 1 != 0: + raise ValueError( + 'The "z_tilde_bot_mid" value must be an integer multiple of ' + 'the vertical resolution to ensure that the vertical grid can ' + 'be evenly divided into layers. Currently, z_tilde_bot_mid = ' + f'{z_tilde_bot_mid} and vert_res = {vert_res}, which results ' + f'in {-z_tilde_bot_mid / vert_res} layers.' + ) + vert_levels = int(-z_tilde_bot_mid / vert_res) + + config.set('vertical_grid', 'vert_levels', str(vert_levels)) + + nx = 2 + ny = 2 + dc = 1e3 * horiz_res + dx = 1e3 * horiz_res + ds_mesh = make_planar_hex_mesh( + nx=nx, ny=ny, dc=dc, nonperiodic_x=True, nonperiodic_y=True + ) + + # cull one more row of cells so we're only left with 2 + cull_cell = ds_mesh.cullCell.values + ncells = ds_mesh.sizes['nCells'] + # remove the last 2 rows in y + cull_cell[ncells - 2 * (nx + 2) : ncells + 1] = 1 + ds_mesh['cullCell'] = xr.DataArray(data=cull_cell, dims=['nCells']) + + write_netcdf(ds_mesh, 'base_mesh.nc') + ds_mesh = cull(ds_mesh, logger=logger) + ds_mesh = convert( + ds_mesh, graphInfoFileName='culled_graph.info', logger=logger + ) + write_netcdf(ds_mesh, 'culled_mesh.nc') + + ncells = ds_mesh.sizes['nCells'] + if ncells != 2: + raise ValueError( + 'The two-column test case requires a mesh with exactly ' + f'2 cells, but the culled mesh has ' + f'{ncells} cells.' + ) + + x = horiz_res * np.array([-0.5, 0.5], dtype=float) + geom_ssh, geom_z_bot = self._get_geom_ssh_z_bot(x) + + goal_geom_water_column_thickness = geom_ssh - geom_z_bot + + # first guess at the pseudo bottom depth is the geometric + # water column thickness + pseudo_bottom_depth = goal_geom_water_column_thickness + + water_col_adjust_iter_count = config.getint( + 'horiz_press_grad', 'water_col_adjust_iter_count' + ) + + if water_col_adjust_iter_count is None: + raise ValueError( + 'The "water_col_adjust_iter_count" configuration option ' + 'must be set in the "horiz_press_grad" section.' + ) + + water_col_adjust_frac_change_threshold = hpg_section.getfloat( + 'water_col_adjust_frac_change_threshold' + ) + if water_col_adjust_frac_change_threshold is None: + raise ValueError( + 'The "water_col_adjust_frac_change_threshold" configuration ' + 'option must be set in the "horiz_press_grad" section.' + ) + if water_col_adjust_frac_change_threshold < 0.0: + raise ValueError( + 'The "water_col_adjust_frac_change_threshold" configuration ' + 'option must be nonnegative.' + ) + + prev_geom_water_column_thickness: xr.DataArray | None = None + + for iter in range(water_col_adjust_iter_count): + ds = self._init_z_tilde_vert_coord(ds_mesh, pseudo_bottom_depth, x) + + z_tilde_mid = ds.zMid + h_tilde = ds.layerThickness + + logger.debug(f'z_tilde_mid = {z_tilde_mid}') + logger.debug(f'h_tilde = {h_tilde}') + + ct, sa = self._interpolate_t_s( + ds=ds, + z_tilde_mid=z_tilde_mid, + x=x, + ) + p_mid = pressure_from_z_tilde( + z_tilde=z_tilde_mid, + ) + + logger.debug(f'ct = {ct}') + logger.debug(f'sa = {sa}') + logger.debug(f'p_mid = {p_mid}') + + spec_vol = compute_specvol( + config=config, + temperature=ct, + salinity=sa, + pressure=p_mid, + ) + + logger.debug(f'geom_z_bot = {geom_z_bot}') + logger.debug(f'spec_vol = {spec_vol}') + + min_level_cell = ds.minLevelCell - 1 + max_level_cell = ds.maxLevelCell - 1 + logger.debug(f'min_level_cell = {min_level_cell}') + logger.debug(f'max_level_cell = {max_level_cell}') + + geom_z_inter, geom_z_mid = geom_height_from_pseudo_height( + geom_z_bot=geom_z_bot, + h_tilde=h_tilde, + spec_vol=spec_vol, + min_level_cell=min_level_cell, + max_level_cell=max_level_cell, + ) + + logger.debug(f'geom_z_inter = {geom_z_inter}') + logger.debug(f'geom_z_mid = {geom_z_mid}') + + # the water column thickness is the difference in the geometric + # height between the first and last valid valid interfaces + + geom_z_min = geom_z_inter.isel( + Time=0, nVertLevelsP1=min_level_cell + ) + geom_z_max = geom_z_inter.isel( + Time=0, nVertLevelsP1=max_level_cell + 1 + ) + # the min is shallower (less negative) than the max + geom_water_column_thickness = geom_z_min - geom_z_max + logger.debug( + f'geom_water_column_thickness = {geom_water_column_thickness}' + ) + + if prev_geom_water_column_thickness is not None: + frac_change = ( + np.abs( + geom_water_column_thickness + - prev_geom_water_column_thickness + ) + / prev_geom_water_column_thickness + ) + max_frac_change = frac_change.max().item() + + logger.info( + f'Iteration {iter}: max fractional change in ' + f'geometric water-column thickness = ' + f'{max_frac_change:.6e}' + ) + + if max_frac_change < water_col_adjust_frac_change_threshold: + logger.info( + f'Early stopping water-column adjustment after ' + f'iteration {iter} because max fractional change ' + f'({max_frac_change:.6e}) is below threshold ' + f'({water_col_adjust_frac_change_threshold:.6e}).' + ) + break + + # scale the pseudo bottom depth proportional to how far off we are + # in the geometric water column thickness from the goal + scaling_factor = ( + goal_geom_water_column_thickness / geom_water_column_thickness + ) + + max_scaling_factor = scaling_factor.max().item() + min_scaling_factor = scaling_factor.min().item() + logger.info( + f'Iteration {iter}: min scaling factor = ' + f'{min_scaling_factor:.6f}, ' + f'max scaling factor = {max_scaling_factor:.6f}' + ) + + pseudo_bottom_depth = pseudo_bottom_depth * scaling_factor + + logger.info( + f'Iteration {iter}: pseudo bottom depths = ' + f'{pseudo_bottom_depth.values}' + ) + + prev_geom_water_column_thickness = geom_water_column_thickness + + ds['temperature'] = ct + ds['salinity'] = sa + ds['SpecVol'] = spec_vol + + ds['SurfacePressure'] = xr.DataArray( + data=np.zeros((1, ncells), dtype=float), + dims=['Time', 'nCells'], + attrs={ + 'long_name': 'surface pressure', + 'units': 'Pa', + }, + ) + + # bottomDepth needs to be the geometric bottom depth of the bathymetry, + # not the pseudo-bottom depth used for the vertical coordinate + ds['bottomDepth'] = -geom_z_bot + ds.bottomDepth.attrs['long_name'] = 'seafloor geometric height' + ds.bottomDepth.attrs['units'] = 'm' + + ds['PressureMid'] = p_mid + ds.PressureMid.attrs['long_name'] = 'pressure at layer midpoints' + ds.PressureMid.attrs['units'] = 'Pa' + + ds['Density'] = 1.0 / ds['SpecVol'] + ds.Density.attrs['long_name'] = 'density' + ds.Density.attrs['units'] = 'kg m-3' + + ds['ZTildeMid'] = ds.zMid + ds.ZTildeMid.attrs['long_name'] = 'pseudo-height at layer midpoints' + ds.ZTildeMid.attrs['units'] = 'm' + + ds['GeomZMid'] = geom_z_mid + ds.GeomZMid.attrs['long_name'] = 'geometric height at layer midpoints' + ds.GeomZMid.attrs['units'] = 'm' + + ds['GeomZInter'] = geom_z_inter + ds.GeomZInter.attrs['long_name'] = ( + 'geometric height at layer interfaces' + ) + ds.GeomZInter.attrs['units'] = 'm' + + self._compute_montgomery_and_hpga(ds=ds, dx=dx, p_mid=p_mid) + + ds.layerThickness.attrs['long_name'] = 'pseudo-layer thickness' + ds.layerThickness.attrs['units'] = 'm' + + ds.zMid.attrs['long_name'] = 'pseudo-height at layer midpoints' + ds.zMid.attrs['units'] = 'm' + + nedges = ds_mesh.sizes['nEdges'] + nvertlevels = ds.sizes['nVertLevels'] + + ds['normalVelocity'] = xr.DataArray( + data=np.zeros((1, nedges, nvertlevels), dtype=float), + dims=['Time', 'nEdges', 'nVertLevels'], + attrs={ + 'long_name': 'normal velocity', + 'units': 'm s-1', + }, + ) + ds['fCell'] = xr.zeros_like(ds_mesh.xCell) + ds['fEdge'] = xr.zeros_like(ds_mesh.xEdge) + ds['fVertex'] = xr.zeros_like(ds_mesh.xVertex) + + ds.attrs['nx'] = nx + ds.attrs['ny'] = ny + ds.attrs['dc'] = dc + self.write_model_dataset(ds, 'initial_state.nc') + + def _compute_montgomery_and_hpga( + self, + ds: xr.Dataset, + dx: float, + p_mid: xr.DataArray, + ) -> None: + """Compute Montgomery potential and a 2-column HPGA. + + This mimics the way Omega will compute the horizontal pressure + gradient: simple finite differences between the two columns. + + The along-column HPGA is computed as: + + HPGA = dM/dx - p_edge * d(alpha)/dx + + where M is the Montgomery potential, alpha is the specific volume, + and p_edge is the pressure averaged between the two columns. + + Outputs are added to ``ds``: + - MontgomeryMid (Time, nCells, nVertLevels) + - MontgomeryInter (Time, nCells, nVertLevels, nbnds) + - HPGAMid (Time, nVertLevels) + - HPGAInter (Time, nVertLevels, nbnds) + """ + + if ds.sizes.get('nCells', 0) != 2: + raise ValueError( + 'The two-column HPGA computation requires exactly 2 cells.' + ) + if dx == 0.0: + raise ValueError('dx must be non-zero for finite differences.') + + # Midpoint quantities (alpha is layerwise constant) + alpha_mid = ds.SpecVol + + # Interface quantities: Omega treats alpha as constant within each + # layer, so interface values are represented as bounds for each layer + # (top and bottom), with discontinuities permitted between layers. + z_tilde_top = ds.zInterface.isel(nVertLevelsP1=slice(0, -1)).rename( + {'nVertLevelsP1': 'nVertLevels'} + ) + z_tilde_bot = ds.zInterface.isel(nVertLevelsP1=slice(1, None)).rename( + {'nVertLevelsP1': 'nVertLevels'} + ) + z_top = ds.GeomZInter.isel(nVertLevelsP1=slice(0, -1)).rename( + {'nVertLevelsP1': 'nVertLevels'} + ) + z_bot = ds.GeomZInter.isel(nVertLevelsP1=slice(1, None)).rename( + {'nVertLevelsP1': 'nVertLevels'} + ) + + z_tilde_bnds = xr.concat([z_tilde_top, z_tilde_bot], dim='nbnds') + z_bnds = xr.concat([z_top, z_bot], dim='nbnds') + # put nbnds last for readability/consistency + z_tilde_bnds = z_tilde_bnds.transpose( + 'Time', 'nCells', 'nVertLevels', 'nbnds' + ) + z_bnds = z_bnds.transpose('Time', 'nCells', 'nVertLevels', 'nbnds') + + alpha_bnds = alpha_mid.expand_dims(nbnds=[0, 1]).transpose( + 'Time', 'nCells', 'nVertLevels', 'nbnds' + ) + # Montgomery: M = alpha * p + g * z, with p = -rho0 * g * z_tilde + montgomery_inter = Gravity * ( + z_bnds - RhoSw * alpha_bnds * z_tilde_bnds + ) + montgomery_inter = montgomery_inter.transpose( + 'Time', 'nCells', 'nVertLevels', 'nbnds' + ) + + # Omega convention: Montgomery potential at midpoints is the mean of + # the two adjacent interface values. + montgomery_mid = 0.5 * ( + montgomery_inter.isel(nbnds=0) + montgomery_inter.isel(nbnds=1) + ) + + # 2-column finite differences across the pair + dM_dx_mid = ( + montgomery_mid.isel(nCells=1) - montgomery_mid.isel(nCells=0) + ) / dx + dalpha_dx_mid = ( + alpha_mid.isel(nCells=1) - alpha_mid.isel(nCells=0) + ) / dx + + dsa_dx_mid = ( + ds.salinity.isel(nCells=1) - ds.salinity.isel(nCells=0) + ) / dx + + # Pressure (positive downward), averaged to the edge between columns + p_edge_mid = 0.5 * (p_mid.isel(nCells=0) + p_mid.isel(nCells=1)) + + hpga_mid = -dM_dx_mid + p_edge_mid * dalpha_dx_mid + + ds['MontgomeryMid'] = montgomery_mid + ds.MontgomeryMid.attrs['long_name'] = ( + 'Montgomery potential at layer midpoints' + ) + ds.MontgomeryMid.attrs['units'] = 'm2 s-2' + + ds['MontgomeryInter'] = montgomery_inter + ds.MontgomeryInter.attrs['long_name'] = ( + 'Montgomery potential at layer interfaces (bounds)' + ) + ds.MontgomeryInter.attrs['units'] = 'm2 s-2' + + ds['HPGA'] = hpga_mid + ds.HPGA.attrs = { + 'long_name': ( + 'along-layer pressure gradient acceleration at layer midpoints' + ), + 'units': 'm s-2', + } + + ds['dMdxMid'] = dM_dx_mid + ds.dMdxMid.attrs = { + 'long_name': 'Gradient of Montgomery potential at layer midpoints', + 'units': 'm s-2', + } + + ds['PEdgeMid'] = p_edge_mid + ds.PEdgeMid.attrs = { + 'long_name': 'Pressure at horizontal edge and layer midpoints', + 'units': 'Pa', + } + + ds['dalphadxMid'] = dalpha_dx_mid + ds.dalphadxMid.attrs = { + 'long_name': 'Gradient of specific volume at layer midpoints', + 'units': 'm2 kg-1', + } + + ds['dSAdxMid'] = dsa_dx_mid + ds.dSAdxMid.attrs = { + 'long_name': 'Gradient of absolute salinity at layer midpoints', + 'units': 'g kg-1 m-1', + } + + def _get_geom_ssh_z_bot( + self, x: np.ndarray + ) -> tuple[xr.DataArray, xr.DataArray]: + """ + Get the geometric sea surface height and sea floor height for each + column from the configuration. + """ + config = self.config + geom_ssh = get_array_from_mid_grad(config, 'geom_ssh', x) + geom_z_bot = get_array_from_mid_grad(config, 'geom_z_bot', x) + return ( + xr.DataArray( + data=geom_ssh, + dims=['nCells'], + attrs={ + 'long_name': 'sea surface geometric height', + 'units': 'm', + }, + ), + xr.DataArray( + data=geom_z_bot, + dims=['nCells'], + attrs={ + 'long_name': 'seafloor geometric height', + 'units': 'm', + }, + ), + ) + + def _init_z_tilde_vert_coord( + self, + ds_mesh: xr.Dataset, + pseudo_bottom_depth: xr.DataArray, + x: np.ndarray, + ) -> xr.Dataset: + """ + Initialize variables for a z-tilde vertical coordinate. + """ + config = self.config + + z_tilde_bot = get_array_from_mid_grad(config, 'z_tilde_bot', x) + + ds = ds_mesh.copy() + + ds['bottomDepth'] = pseudo_bottom_depth + ds.bottomDepth.attrs['long_name'] = 'seafloor pseudo-height' + ds.bottomDepth.attrs['units'] = 'm' + # the pseudo-ssh is always zero (like the surface pressure) + ds['ssh'] = xr.zeros_like(pseudo_bottom_depth) + ds.ssh.attrs['long_name'] = 'sea surface pseudo-height' + ds.ssh.attrs['units'] = 'm' + ds_list: list[xr.Dataset] = [] + for icell in range(ds.sizes['nCells']): + # initialize the vertical coordinate for each column separately + # to allow different pseudo-bottom depths + pseudo_bottom_depth = -z_tilde_bot[icell] + # keep `nCells` dimension + ds_cell = ds.isel(nCells=slice(icell, icell + 1)) + local_config = config.copy() + local_config.set( + 'vertical_grid', 'bottom_depth', str(pseudo_bottom_depth) + ) + + init_vertical_coord(local_config, ds_cell) + cell_vars = [ + var + for var in ds_cell.data_vars + if 'nCells' in ds_cell[var].dims + ] + ds_list.append(ds_cell[cell_vars]) + + # copy back only the cell variables + ds_cell_vars = xr.concat(ds_list, dim='nCells') + for var in ds_cell_vars.data_vars: + attrs = ds_cell_vars[var].attrs + ds[var] = ds_cell_vars[var] + ds[var].attrs = attrs + return ds + + def _get_z_tilde_t_s_nodes( + self, x: np.ndarray + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Get the z-tilde, temperature and salinity node values from the + configuration. + """ + config = self.config + z_tilde_node = get_array_from_mid_grad(config, 'z_tilde', x) + t_node = get_array_from_mid_grad(config, 'temperature', x) + s_node = get_array_from_mid_grad(config, 'salinity', x) + + if ( + z_tilde_node.shape != t_node.shape + or z_tilde_node.shape != s_node.shape + ): + raise ValueError( + 'The number of z_tilde, temperature and salinity ' + 'points must be the same in each column.' + ) + + return z_tilde_node, t_node, s_node + + def _interpolate_t_s( + self, + ds: xr.Dataset, + z_tilde_mid: xr.DataArray, + x: np.ndarray, + ) -> tuple[xr.DataArray, xr.DataArray]: + """ + Compute temperature, salinity, pressure and specific volume given + z-tilde + """ + + z_tilde_node, t_node, s_node = self._get_z_tilde_t_s_nodes(x) + + ncells = ds.sizes['nCells'] + nvertlevels = ds.sizes['nVertLevels'] + + temperature_np = np.zeros((1, ncells, nvertlevels), dtype=float) + salinity_np = np.zeros((1, ncells, nvertlevels), dtype=float) + + if z_tilde_node.shape[0] != ncells: + raise ValueError( + 'The number of z_tilde columns provided must match the ' + 'number of mesh columns.' + ) + if t_node.shape[0] != ncells: + raise ValueError( + 'The number of temperature columns provided must match the ' + 'number of mesh columns.' + ) + if s_node.shape[0] != ncells: + raise ValueError( + 'The number of salinity columns provided must match the ' + 'number of mesh columns.' + ) + + for icell in range(ncells): + z_tilde = z_tilde_node[icell, :] + temperatures = t_node[icell, :] + salinities = s_node[icell, :] + z_tilde_mid_col = z_tilde_mid.isel(Time=0, nCells=icell).values + + if len(z_tilde) < 2: + raise ValueError( + 'At least two z_tilde points are required to ' + 'define piecewise linear initial conditions.' + ) + + t_interp = get_pchip_interpolator( + z_tilde_nodes=z_tilde, + values_nodes=temperatures, + name='temperature', + ) + s_interp = get_pchip_interpolator( + z_tilde_nodes=z_tilde, + values_nodes=salinities, + name='salinity', + ) + valid = np.isfinite(z_tilde_mid_col) + temperature_np[0, icell, :] = np.nan + salinity_np[0, icell, :] = np.nan + if np.any(valid): + temperature_np[0, icell, valid] = t_interp( + z_tilde_mid_col[valid] + ) + salinity_np[0, icell, valid] = s_interp(z_tilde_mid_col[valid]) + + temperature = xr.DataArray( + data=temperature_np, + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'conservative temperature', + 'units': 'degC', + }, + ) + salinity = xr.DataArray( + data=salinity_np, + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'absolute salinity', + 'units': 'g kg-1', + }, + ) + + return temperature, salinity diff --git a/polaris/tasks/ocean/horiz_press_grad/reference.py b/polaris/tasks/ocean/horiz_press_grad/reference.py new file mode 100644 index 0000000000..6d72547c8e --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/reference.py @@ -0,0 +1,952 @@ +from __future__ import annotations + +from fractions import Fraction +from math import gcd, lcm +from typing import Callable, Literal, Sequence + +import gsw +import numpy as np +import xarray as xr + +from polaris.ocean.model import OceanIOStep + +# temporary until we can get this for GCD +from polaris.ocean.vertical.ztilde import Gravity, RhoSw +from polaris.tasks.ocean.horiz_press_grad.column import ( + get_array_from_mid_grad, + get_pchip_interpolator, +) + + +class Reference(OceanIOStep): + r""" + A step for creating a high-fidelity reference solution for two column + test cases. + + The reference solution is computed by first converting from the + Omega pseudo-height coordinate :math:`\tilde z` (``z_tilde``) to true + geometric height ``z`` by numerically integrating the hydrostatic + relation: + + .. math:: + + \frac{\partial z}{\partial \tilde z} = + \rho_0\,\nu\left(S_A, \Theta, p\right) + + where :math:`\nu` is the specific volume (``spec_vol``) computed from the + TEOS-10 equation of state, :math:`S_A` is Absolute Salinity, + :math:`\Theta` is Conservative Temperature, :math:`p` is sea pressure + (positive downward), and :math:`\rho_0` is a reference density used in the + definition :math:`\tilde z = -p/(\rho_0 g)`. The conversion therefore + requires an integral of the form: + + .. math:: + + z(\tilde z) = z_b + \int_{\tilde z_b}^{\tilde z} + \rho_0\,\nu\bigl(S_A(\tilde z'),\Theta(\tilde z'),p(\tilde z')\bigr)\; + d\tilde z' , + + with :math:`z_b = -\mathrm{bottom\_depth}` at the pseudo-height + ``z_tilde_b`` at the seafloor, typically the minimum (most negative) value + of the pseudo-height domain for a given water column. + + Then, the horizontal gradient is computed using a 4th-order + finite-difference stencil at the center column (x=0) to obtain the + high-fidelity reference solution for the hydrostatic pressure gradient + error. + """ + + def __init__(self, component, indir): + """ + Create the step + + Parameters + ---------- + component : polaris.Component + The component the step belongs to + + indir : str + The subdirectory that the task belongs to, that this step will + go into a subdirectory of + """ + name = 'reference' + super().__init__(component=component, name=name, indir=indir) + for file in [ + 'reference_solution.nc', + ]: + self.add_output_file(file) + + def run(self): + """ + Run this step of the test case + """ + logger = self.logger + # logger.setLevel(logging.DEBUG) + config = self.config + if config.get('ocean', 'model') != 'omega': + raise ValueError( + 'The horiz_press_grad test case is only supported for the ' + 'Omega ocean model.' + ) + + resolution = config.getfloat('horiz_press_grad', 'reference_horiz_res') + assert resolution is not None, ( + 'The "reference_horiz_res" configuration option must be set in ' + 'the "horiz_press_grad" section.' + ) + + x = resolution * np.array([-1.5, -0.5, 0.0, 0.5, 1.5], dtype=float) + + geom_ssh, geom_z_bot, z_tilde_bot = self._get_ssh_z_bot(x) + + test_vert_res = config.getexpression( + 'horiz_press_grad', 'vert_resolutions' + ) + # Choose a reference vertical resolution based on the greatest common + # spacing among all test vertical resolutions so every test + # resolution is an integer multiple of 2 * vert_res. + vert_res = _get_reference_vert_res(test_vert_res) + logger.info( + f'Reference vertical resolution: vert_res = {vert_res:.12g} m' + ) + z_tilde_bot_mid = config.getfloat( + 'horiz_press_grad', 'z_tilde_bot_mid' + ) + + assert z_tilde_bot_mid is not None, ( + 'The "z_tilde_bot_mid" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + + vert_levels = int(-z_tilde_bot_mid / vert_res) + + config.set('vertical_grid', 'vert_levels', str(vert_levels)) + + vert_levs_inters = 2 * vert_levels + 1 + z_tilde = np.nan * np.ones((len(x), vert_levs_inters), dtype=float) + z = np.nan * np.ones((len(x), vert_levs_inters), dtype=float) + spec_vol = np.nan * np.ones((len(x), vert_levs_inters), dtype=float) + ct = np.nan * np.ones((len(x), vert_levs_inters), dtype=float) + sa = np.nan * np.ones((len(x), vert_levs_inters), dtype=float) + uniform_layer_mask = np.zeros((len(x), vert_levs_inters), dtype=bool) + + z_tilde_node, temperature_node, salinity_node = ( + self._get_z_tilde_t_s_nodes(x) + ) + + for icol in range(len(x)): + logger.info(f'Computing column {icol}, x = {x[icol]:.3f} km') + ( + z_tilde[icol, :], + z[icol, :], + spec_vol[icol, :], + ct[icol, :], + sa[icol, :], + uniform_layer_mask[icol, :], + ) = self._compute_column( + z_tilde_node=z_tilde_node[icol, :], + temperature_node=temperature_node[icol, :], + salinity_node=salinity_node[icol, :], + geom_ssh=geom_ssh.isel(nCells=icol).item(), + geom_z_bot=geom_z_bot.isel(nCells=icol).item(), + z_tilde_bot=z_tilde_bot.isel(nCells=icol).item(), + ) + + valid_grad_mask = np.all(uniform_layer_mask[[1, 2, 3], :], axis=0) + + dx = resolution * 1e3 # m + + # compute Montgomery potential M = alpha * p + g * z + # with p = -rho0 * g * z_tilde (p positive downward) + montgomery = Gravity * (z - RhoSw * spec_vol * z_tilde) + + dx = resolution * 1.0e3 # m + + check_gradient = False + + if check_gradient: + # sanity checks for 4th-order gradient stencil + x_m = dx * np.array([-1.5, -0.5, 0.5, 1.5], dtype=float) + + # exact for polynomials up to degree 3 + poly = 2.5 + 1.2 * x_m - 0.7 * x_m**2 + 0.9 * x_m**3 + _check_gradient( + self.logger, poly, expected=1.2, name='cubic polynomial', dx=dx + ) + + # smooth function check (should be highly accurate) + k = 2.0 * np.pi / (20.0 * dx) + sine = np.sin(k * x_m) + _check_gradient( + self.logger, sine, expected=k, name='sin(kx)', dx=dx + ) + + # the HPGF is grad(M) - p * grad(alpha) + # Here we just compute the gradient at x=0 using a 4th-order + # finite-difference stencil + p0 = -RhoSw * Gravity * z_tilde[2, :] + # indices for -1.5dx, -0.5dx, 0.5dx, 1.5dx + grad_indices = [0, 1, 3, 4] + dM_dx = _compute_4th_order_gradient(montgomery[grad_indices, :], dx) + dalpha_dx = _compute_4th_order_gradient(spec_vol[grad_indices, :], dx) + hpga = -dM_dx + p0 * dalpha_dx + + dsa_dx = _compute_4th_order_gradient(sa[grad_indices, :], dx) + + # cells = [1, 3] # indices for -0.5km and 0.5km + cells = np.arange(len(x)) # use all columns + ds = xr.Dataset() + ds['temperature'] = xr.DataArray( + data=ct[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'conservative temperature', + 'units': 'degC', + }, + ) + ds['salinity'] = xr.DataArray( + data=sa[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'salinity', + 'units': 'g kg-1', + }, + ) + + ds['SpecVol'] = xr.DataArray( + data=spec_vol[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'specific volume', + 'units': 'm3 kg-1', + }, + ) + ds['Density'] = 1.0 / ds['SpecVol'] + ds.Density.attrs['long_name'] = 'density' + ds.Density.attrs['units'] = 'kg m-3' + + ds['ZTildeMid'] = xr.DataArray( + data=z_tilde[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + ) + ds.ZTildeMid.attrs['long_name'] = 'pseudo-height at layer midpoints' + ds.ZTildeMid.attrs['units'] = 'm' + + ds['ZTildeInter'] = xr.DataArray( + data=z_tilde[np.newaxis, cells, 0::2], + dims=['Time', 'nCells', 'nVertLevelsP1'], + ) + ds.ZTildeInter.attrs['long_name'] = 'pseudo-height at layer interfaces' + ds.ZTildeInter.attrs['units'] = 'm' + + ds['GeomZMid'] = xr.DataArray( + data=z[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'geometric height at layer midpoints', + 'units': 'm', + }, + ) + + ds['GeomZInter'] = xr.DataArray( + data=z[np.newaxis, cells, 0::2], + dims=['Time', 'nCells', 'nVertLevelsP1'], + attrs={ + 'long_name': 'geometric height at layer interfaces', + 'units': 'm', + }, + ) + + ds['MontgomeryMid'] = xr.DataArray( + data=montgomery[np.newaxis, cells, 1::2], + dims=['Time', 'nCells', 'nVertLevels'], + attrs={ + 'long_name': 'Montgomery potential at layer midpoints', + 'units': 'm2 s-2', + }, + ) + + ds['MontgomeryInter'] = xr.DataArray( + data=montgomery[np.newaxis, cells, 0::2], + dims=['Time', 'nCells', 'nVertLevelsP1'], + attrs={ + 'long_name': 'Montgomery potential at layer interfaces', + 'units': 'm2 s-2', + }, + ) + + ds['HPGAMid'] = xr.DataArray( + data=hpga[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'along-layer pressure gradient acceleration at ' + 'midpoints', + 'units': 'm s-2', + }, + ) + + ds['HPGAInter'] = xr.DataArray( + data=hpga[np.newaxis, 0::2], + dims=['Time', 'nVertLevelsP1'], + attrs={ + 'long_name': 'along-layer pressure gradient acceleration at ' + 'interfaces', + 'units': 'm s-2', + }, + ) + + ds['dMdxMid'] = xr.DataArray( + data=dM_dx[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'Gradient of Montgomery potential at layer ' + 'midpoints', + 'units': 'm s-2', + }, + ) + + ds['PEdgeMid'] = xr.DataArray( + data=p0[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'Pressure at horizontal edge and layer midpoints', + 'units': 'Pa', + }, + ) + + ds['dalphadxMid'] = xr.DataArray( + data=dalpha_dx[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'Gradient of specific volume at layer midpoints', + 'units': 'm2 kg-1', + }, + ) + + ds['dSAdxMid'] = xr.DataArray( + data=dsa_dx[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'Gradient of absolute salinity at layer ' + 'midpoints', + 'units': 'g kg-1 m-1', + }, + ) + + ds['ValidGradMidMask'] = xr.DataArray( + data=valid_grad_mask[np.newaxis, 1::2], + dims=['Time', 'nVertLevels'], + attrs={ + 'long_name': 'Mask indicating layers with valid gradients at ' + 'midpoints', + 'units': '1', + }, + ) + ds['ValidGradInterMask'] = xr.DataArray( + data=valid_grad_mask[np.newaxis, 0::2], + dims=['Time', 'nVertLevelsP1'], + attrs={ + 'long_name': 'Mask indicating layers with valid gradients at ' + 'interfaces', + 'units': '1', + }, + ) + + self.write_model_dataset(ds, 'reference_solution.nc') + + def _get_ssh_z_bot( + self, x: np.ndarray + ) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray]: + """ + Get the geometric sea surface height and sea floor height, as well as + sea floor pseudo-height for each column from the configuration. + """ + config = self.config + geom_ssh = get_array_from_mid_grad(config, 'geom_ssh', x) + geom_z_bot = get_array_from_mid_grad(config, 'geom_z_bot', x) + z_tilde_bot = get_array_from_mid_grad(config, 'z_tilde_bot', x) + return ( + xr.DataArray(data=geom_ssh, dims=['nCells']), + xr.DataArray(data=geom_z_bot, dims=['nCells']), + xr.DataArray(data=z_tilde_bot, dims=['nCells']), + ) + + def _init_z_tilde_interface( + self, pseudo_bottom_depth: float, z_tilde_bot: float + ) -> tuple[np.ndarray, int, np.ndarray]: + """ + Compute z-tilde vertical interfaces. + """ + section = self.config['vertical_grid'] + vert_levels = section.getint('vert_levels') + z_tilde_interface = np.linspace( + 0.0, z_tilde_bot, vert_levels + 1, dtype=float + ) + # layers where z_tilde is not adjusted for bathymetry + uniform_layer_mask = z_tilde_interface >= -pseudo_bottom_depth + + z_tilde_interface = np.maximum(z_tilde_interface, -pseudo_bottom_depth) + dz = z_tilde_interface[0:-1] - z_tilde_interface[1:] + mask = dz == 0.0 + z_tilde_interface[1:][mask] = np.nan + + # max_layer is the index of the deepest non-nan layer interface + max_layer = np.where(~mask)[0][-1] + 1 + + return z_tilde_interface, max_layer, uniform_layer_mask + + def _get_z_tilde_t_s_nodes( + self, x: np.ndarray + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Get the z-tilde, temperature and salinity node values from the + configuration. + """ + config = self.config + z_tilde_node = get_array_from_mid_grad(config, 'z_tilde', x) + t_node = get_array_from_mid_grad(config, 'temperature', x) + s_node = get_array_from_mid_grad(config, 'salinity', x) + + if ( + z_tilde_node.shape != t_node.shape + or z_tilde_node.shape != s_node.shape + ): + raise ValueError( + 'The number of z_tilde, temperature and salinity ' + 'points must be the same in each column.' + ) + + self.logger.debug('z_tilde nodes:') + self.logger.debug(z_tilde_node) + self.logger.debug('temperature nodes:') + self.logger.debug(t_node) + self.logger.debug('salinity nodes:') + self.logger.debug(s_node) + + return z_tilde_node, t_node, s_node + + def _compute_column( + self, + z_tilde_node: np.ndarray, + temperature_node: np.ndarray, + salinity_node: np.ndarray, + geom_ssh: float, + geom_z_bot: float, + z_tilde_bot: float, + ) -> tuple[ + np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray + ]: + config = self.config + logger = self.logger + section = config['horiz_press_grad'] + method = section.get('reference_quadrature_method') + assert method is not None, ( + 'The "reference_quadrature_method" configuration option must be ' + 'set in the "horiz_press_grad" section.' + ) + water_col_adjust_iter_count = section.getint( + 'water_col_adjust_iter_count' + ) + assert water_col_adjust_iter_count is not None, ( + 'The "water_col_adjust_iter_count" configuration option must be ' + 'set in the "horiz_press_grad" section.' + ) + + goal_geom_water_column_thickness = geom_ssh - geom_z_bot + + # first guess at the pseudo bottom depth is the geometric + # water column thickness + pseudo_bottom_depth = goal_geom_water_column_thickness + + logger.debug( + f'goal_geom_water_column_thickness = ' + f'{goal_geom_water_column_thickness:.12f}' + ) + + for iter in range(water_col_adjust_iter_count): + z_tilde_inter, max_layer, uniform_layer_mask_inter = ( + self._init_z_tilde_interface( + pseudo_bottom_depth=pseudo_bottom_depth, + z_tilde_bot=z_tilde_bot, + ) + ) + vert_levels = len(z_tilde_inter) - 1 + + z_tilde_mid = 0.5 * (z_tilde_inter[0:-1] + z_tilde_inter[1:]) + + # z_tilde has both interfaces and midpoints + z_tilde = np.zeros(2 * vert_levels + 1, dtype=float) + z_tilde[0::2] = z_tilde_inter + z_tilde[1::2] = z_tilde_mid + + uniform_layer_mask_mid = ( + uniform_layer_mask_inter[0:-1] & uniform_layer_mask_inter[1:] + ) + uniform_layer_mask = np.zeros(2 * vert_levels + 1, dtype=bool) + uniform_layer_mask[0::2] = uniform_layer_mask_inter + uniform_layer_mask[1::2] = uniform_layer_mask_mid + + valid = slice(0, 2 * max_layer + 1) + + z_tilde_valid = z_tilde[valid] + logger.debug(f'z_tilde_valid = {z_tilde_valid}') + logger.debug(f'z_tilde invalid = {z_tilde[2 * max_layer + 1 :]}') + + z = np.nan * np.ones_like(z_tilde) + spec_vol = np.nan * np.ones_like(z_tilde) + ct = np.nan * np.ones_like(z_tilde) + sa = np.nan * np.ones_like(z_tilde) + + ( + z[valid], + spec_vol[valid], + ct[valid], + sa[valid], + ) = _integrate_geometric_height( + z_tilde_interfaces=z_tilde_valid, + z_tilde_nodes=z_tilde_node, + sa_nodes=salinity_node, + ct_nodes=temperature_node, + bottom_depth=-geom_z_bot, + method=method, + ) + + geom_water_column_thickness = z[0] - z[2 * max_layer] + + scaling_factor = ( + goal_geom_water_column_thickness / geom_water_column_thickness + ) + logger.info( + f' Iteration {iter}: ' + f'scaling factor = {scaling_factor:.12f}, ' + f'scaling factor - 1 = {scaling_factor - 1.0:.12g}, ' + f'pseudo bottom depth = {pseudo_bottom_depth:.12f}, ' + f'max layer = {max_layer}' + ) + pseudo_bottom_depth *= scaling_factor + logger.info('') + + return z_tilde, z, spec_vol, ct, sa, uniform_layer_mask + + +def _integrate_geometric_height( + z_tilde_interfaces: Sequence[float] | np.ndarray, + z_tilde_nodes: Sequence[float] | np.ndarray, + sa_nodes: Sequence[float] | np.ndarray, + ct_nodes: Sequence[float] | np.ndarray, + bottom_depth: float, + method: Literal[ + 'midpoint', 'trapezoid', 'simpson', 'gauss2', 'gauss4', 'adaptive' + ] = 'gauss4', + subdivisions: int = 2, + rel_tol: float = 5e-8, + abs_tol: float = 5e-5, + max_recurs_depth: int = 12, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """ + Integrate the hydrostatic relation to obtain geometric height. + + Integrates upward from the seafloor using + ``dz/dz_tilde = rho0 * spec_vol(SA, CT, p)`` to obtain geometric + heights ``z`` at requested pseudo-heights ``z_tilde_interfaces``. + + The salinity and temperature profiles are supplied as *piecewise + linear* functions of pseudo-height via collocation nodes and their + values. Outside the node range, profiles are held constant at the + end values (``numpy.interp`` behavior), permitting targets to extend + above or below the collocation range. + + Methods + ------- + The integral over each interface interval can be evaluated with one of + the following schemes (set with ``method``): + + - 'midpoint': composite midpoint rule with ``subdivisions`` panels. + - 'trapezoid': composite trapezoidal rule with ``subdivisions`` panels. + - 'simpson': composite Simpson's rule; requires an even number of + panels. If ``subdivisions`` is odd, one is added internally. + - 'gauss2': 2-point Gauss-Legendre per panel (higher accuracy than + midpoint/trapezoid at similar cost). + - 'gauss4': 4-point Gauss-Legendre per panel (default; high accuracy + for smooth integrands). + - 'adaptive': adaptive recursive Simpson integration controlled by + ``rel_tol``, ``abs_tol`` and ``max_depth``. + + Parameters + ---------- + z_tilde_interfaces : sequence of float + Monotonic non-increasing layer-interface pseudo-heights ordered + from sea surface to seafloor. The first value corresponds to the + sea surface (typically near 0) and the last to the seafloor + (most negative). Values may extend outside the node range. + z_tilde_nodes : sequence of float + Strictly increasing collocation nodes for SA and CT. + sa_nodes, ct_nodes : sequence of float + Absolute Salinity (g/kg) and Conservative Temperature (degC) at + ``z_tilde_nodes``. + bottom_depth : float + Positive depth (m); geometric height at seafloor is ``-bottom_depth``. + method : str, optional + Quadrature method ('midpoint','trapezoid','simpson','gauss2', + 'gauss4','adaptive'). Default 'gauss4'. + subdivisions : int, optional + Subdivisions per interval for fixed-step methods (>=1). Ignored + for 'adaptive'. + rel_tol, abs_tol : float, optional + Relative/absolute tolerances for adaptive Simpson. + max_recurs_depth : int, optional + Max recursion depth for adaptive Simpson. + + Returns + ------- + z : ndarray + Geometric heights at ``z_tilde_interfaces``. + spec_vol : ndarray + Specific volume at targets. + ct : ndarray + Conservative temperature at targets. + sa : ndarray + Absolute salinity at targets. + """ + + z_tilde_interfaces = np.asarray(z_tilde_interfaces, dtype=float) + z_tilde_nodes = np.asarray(z_tilde_nodes, dtype=float) + sa_nodes = np.asarray(sa_nodes, dtype=float) + ct_nodes = np.asarray(ct_nodes, dtype=float) + + if not ( + z_tilde_nodes.ndim == sa_nodes.ndim == ct_nodes.ndim == 1 + and z_tilde_interfaces.ndim == 1 + ): + raise ValueError('All inputs must be one-dimensional.') + if len(z_tilde_nodes) != len(sa_nodes) or len(z_tilde_nodes) != len( + ct_nodes + ): + raise ValueError( + 'Lengths of z_tilde_nodes, sa_nodes, ct_nodes differ.' + ) + if len(z_tilde_nodes) < 2: + raise ValueError('Need at least two collocation nodes.') + if not np.all(np.diff(z_tilde_nodes) <= 0): + raise ValueError('z_tilde_nodes must be strictly non-increasing.') + if not np.all(np.diff(z_tilde_interfaces) <= 0): + raise ValueError('z_tilde_interfaces must be non-increasing.') + if subdivisions < 1: + raise ValueError('subdivisions must be >= 1.') + + sa_interp = get_pchip_interpolator( + z_tilde_nodes=z_tilde_nodes, + values_nodes=sa_nodes, + name='salinity', + ) + ct_interp = get_pchip_interpolator( + z_tilde_nodes=z_tilde_nodes, + values_nodes=ct_nodes, + name='temperature', + ) + + def spec_vol_ct_sa_at( + z_tilde: np.ndarray, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + sa = sa_interp(z_tilde) + ct = ct_interp(z_tilde) + p_pa = -RhoSw * Gravity * z_tilde + # gsw expects pressure in dbar + spec_vol = gsw.specvol(sa, ct, p_pa * 1.0e-4) + return spec_vol, ct, sa + + def integrand(z_tilde: np.ndarray) -> np.ndarray: + spec_vol, _, _ = spec_vol_ct_sa_at(z_tilde) + return RhoSw * spec_vol + + # fill interface heights: anchor bottom, integrate upward (reverse) + n_interfaces = len(z_tilde_interfaces) + if n_interfaces < 2: + raise ValueError('Need at least two interfaces (surface and bottom).') + z = np.empty_like(z_tilde_interfaces) + z[-1] = -bottom_depth + for i in range(n_interfaces - 1, 0, -1): + a = z_tilde_interfaces[i - 1] # shallower + b = z_tilde_interfaces[i] # deeper + if a == b: + z[i - 1] = z[i] + continue + if method == 'adaptive': + inc = _adaptive_simpson( + integrand, a, b, rel_tol, abs_tol, max_recurs_depth + ) + else: + nsub = subdivisions + if method == 'simpson' and nsub % 2 == 1: + nsub += 1 + inc = _fixed_quadrature(integrand, a, b, nsub, method) + z[i - 1] = z[i] - inc + + spec_vol, ct, sa = spec_vol_ct_sa_at(z_tilde_interfaces) + return z, spec_vol, ct, sa + + +def _get_reference_vert_res( + test_vert_res: Sequence[float] | np.ndarray, +) -> float: + """ + Compute the reference vertical resolution. + + The returned value ``vert_res`` is chosen so that each test vertical + resolution is an integer multiple of ``2 * vert_res``. + """ + test_vert_res = np.asarray(test_vert_res, dtype=float) + if test_vert_res.size == 0: + raise ValueError('At least one test vertical resolution is required.') + + flat = test_vert_res.ravel() + if not np.all(np.isfinite(flat)): + raise ValueError('All test vertical resolutions must be finite.') + if np.any(flat <= 0.0): + raise ValueError('All test vertical resolutions must be positive.') + + fractions = [Fraction(str(value)) for value in flat] + + common_spacing = fractions[0] + for value in fractions[1:]: + common_spacing = _fraction_gcd(common_spacing, value) + + if common_spacing <= 0: + raise ValueError( + 'Could not determine a positive common spacing from ' + 'test vertical resolutions.' + ) + + vert_res = float(common_spacing) / 2.0 + + # Defensive check against accidental precision/pathological inputs. + multipliers = flat / (2.0 * vert_res) + if not np.allclose( + multipliers, np.round(multipliers), rtol=0.0, atol=1.0e-12 + ): + raise ValueError( + 'Test vertical resolutions are not integer multiples of ' + '2 * reference vertical resolution.' + ) + + return vert_res + + +def _fraction_gcd(a: Fraction, b: Fraction) -> Fraction: + return Fraction( + gcd(a.numerator, b.numerator), lcm(a.denominator, b.denominator) + ) + + +def _fixed_quadrature( + integrand: Callable[[np.ndarray], np.ndarray], + a: float, + b: float, + nsub: int, + method: str, +) -> float: + """Composite fixed-step quadrature over [a,b].""" + h = (b - a) / nsub + total = 0.0 + if method == 'midpoint': + mids = a + (np.arange(nsub) + 0.5) * h + total = np.sum(integrand(mids)) * h + elif method == 'trapezoid': + x = a + np.arange(nsub + 1) * h + fx = integrand(x) + total = h * (0.5 * fx[0] + fx[1:-1].sum() + 0.5 * fx[-1]) + elif method == 'simpson': + if nsub % 2 != 0: + raise ValueError('Simpson requires even nsub.') + x = a + np.arange(nsub + 1) * h + fx = integrand(x) + total = ( + h + / 3.0 + * ( + fx[0] + + fx[-1] + + 4.0 * fx[1:-1:2].sum() + + 2.0 * fx[2:-2:2].sum() + ) + ) + elif method in {'gauss2', 'gauss4'}: + total = _gauss_composite(integrand, a, b, nsub, method) + else: # pragma: no cover - defensive + raise ValueError(f'Unknown quadrature method: {method}') + return float(total) + + +def _gauss_composite( + integrand: Callable[[np.ndarray], np.ndarray], + a: float, + b: float, + nsub: int, + method: str, +) -> float: + """Composite Gauss-Legendre quadrature (2- or 4-point).""" + h = (b - a) / nsub + total = 0.0 + if method == 'gauss2': + xi = np.array([-1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)]) + wi = np.array([1.0, 1.0]) + else: # gauss4 + xi = np.array( + [ + -0.8611363115940526, + -0.3399810435848563, + 0.3399810435848563, + 0.8611363115940526, + ] + ) + wi = np.array( + [ + 0.34785484513745385, + 0.6521451548625461, + 0.6521451548625461, + 0.34785484513745385, + ] + ) + for k in range(nsub): + a_k = a + k * h + b_k = a_k + h + mid = 0.5 * (a_k + b_k) + half = 0.5 * h + xk = mid + half * xi + fx = integrand(xk) + total += half * np.sum(wi * fx) + return float(total) + + +def _adaptive_simpson( + integrand: Callable[[np.ndarray], np.ndarray], + a: float, + b: float, + rel_tol: float, + abs_tol: float, + max_depth: int, +) -> float: + """Adaptive Simpson integration over [a,b].""" + fa = integrand(np.array([a]))[0] + fb = integrand(np.array([b]))[0] + m = 0.5 * (a + b) + fm = integrand(np.array([m]))[0] + whole = _simpson_basic(fa, fm, fb, a, b) + return _adaptive_simpson_recursive( + integrand, a, b, fa, fm, fb, whole, rel_tol, abs_tol, max_depth, 0 + ) + + +def _simpson_basic( + fa: float, fm: float, fb: float, a: float, b: float +) -> float: + """Single Simpson panel.""" + return (b - a) / 6.0 * (fa + 4.0 * fm + fb) + + +def _adaptive_simpson_recursive( + integrand: Callable[[np.ndarray], np.ndarray], + a: float, + b: float, + fa: float, + fm: float, + fb: float, + whole: float, + rel_tol: float, + abs_tol: float, + max_depth: int, + depth: int, +) -> float: + m = 0.5 * (a + b) + lm = 0.5 * (a + m) + rm = 0.5 * (m + b) + flm = integrand(np.array([lm]))[0] + frm = integrand(np.array([rm]))[0] + left = _simpson_basic(fa, flm, fm, a, m) + right = _simpson_basic(fm, frm, fb, m, b) + S2 = left + right + err = S2 - whole + tol = max(abs_tol, rel_tol * max(abs(S2), 1e-15)) + if depth >= max_depth: + return S2 + if abs(err) < 15.0 * tol: + return S2 + err / 15.0 # Richardson extrapolation + return _adaptive_simpson_recursive( + integrand, + a, + m, + fa, + flm, + fm, + left, + rel_tol, + abs_tol, + max_depth, + depth + 1, + ) + _adaptive_simpson_recursive( + integrand, + m, + b, + fm, + frm, + fb, + right, + rel_tol, + abs_tol, + max_depth, + depth + 1, + ) + + +def _check_gradient( + logger, + values: np.ndarray, + expected: float, + name: str, + dx: float | None = None, + rel_tol: float = 1.0e-12, + abs_tol: float = 5.0e-8, +) -> None: + """Check the 4th-order gradient stencil against an analytic value.""" + if dx is None: + raise ValueError('dx must be provided for gradient checks.') + calc = _compute_4th_order_gradient(values[:, np.newaxis], dx)[0] + err = calc - expected + logger.info( + f'4th-order gradient check {name}: ' + f'calc={calc:.6e}, expected={expected:.6e}, ' + f'err={err:.3e}' + ) + tol = max(abs_tol, rel_tol * max(1.0, abs(expected))) + if not np.isfinite(calc) or abs(err) > tol: + raise ValueError( + f'4th-order gradient check failed for {name}: ' + f'calc={calc}, expected={expected}, err={err}' + ) + + +def _compute_4th_order_gradient(f: np.ndarray, dx: float) -> np.ndarray: + """ + Compute a 4th-order finite-difference gradient of f with respect to x + at x=0, assuming values at x = dx * [-1.5, -0.5, 0.5, 1.5]. + + The stencil is: + f'(0) ≈ [f(-1.5dx) - 27 f(-0.5dx) + 27 f(0.5dx) - f(1.5dx)] + / (24 dx) + + Here we assume f[0,:], f[1,:], f[2,:], f[3,:] correspond to + x = -1.5dx, -0.5dx, 0.5dx, 1.5dx respectively. + """ + assert f.shape[0] == 4, ( + 'Input array must have exactly 4 entries in its first dimension ' + 'for the 4th-order gradient.' + ) + + # gradient at x = 0 using the non-uniform 4-point stencil + df_dx = (f[0, :] - 27.0 * f[1, :] + 27.0 * f[2, :] - f[3, :]) / (24.0 * dx) + + # mask any locations where inputs are NaN + nan_mask = np.any(np.isnan(f), axis=0) + df_dx[nan_mask] = np.nan + + return df_dx diff --git a/polaris/tasks/ocean/horiz_press_grad/salinity_gradient.cfg b/polaris/tasks/ocean/horiz_press_grad/salinity_gradient.cfg new file mode 100644 index 0000000000..541d1ab80d --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/salinity_gradient.cfg @@ -0,0 +1,8 @@ +# config options for two column testcases +[horiz_press_grad] + +# salinities for piecewise linear initial conditions at the midpoint between +# the two columns and their gradients +salinity_mid = [35.6, 35.4, 35.0, 34.8, 34.75] +# g/kg / km +salinity_grad = [0.8, 0.5, 0.3, 0.2, 0.1] diff --git a/polaris/tasks/ocean/horiz_press_grad/task.py b/polaris/tasks/ocean/horiz_press_grad/task.py new file mode 100644 index 0000000000..407daa8b9d --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/task.py @@ -0,0 +1,127 @@ +import os + +from polaris import Task +from polaris.tasks.ocean.horiz_press_grad.analysis import Analysis +from polaris.tasks.ocean.horiz_press_grad.forward import Forward +from polaris.tasks.ocean.horiz_press_grad.init import Init +from polaris.tasks.ocean.horiz_press_grad.reference import Reference + + +class HorizPressGradTask(Task): + """ + The two-column test case tests convergence of the TEOS-10 pressure-gradient + computation in Omega at various horizontal and vertical resolutions. The + test uses fixed horizontal gradients in various proprties (e.g. salinity + and pseudo-height) between two adjacent ocean columns, as set by config + options. + + The test includes a a quasi-analytic solution to horizontal + pressure-gradient acceleration (HPGA) used for verification. It also + includes a set of Omega two-column initial conditions at various + resolutions. + + The test also includes single-time-step forward model runs at each + resolution that output Omega's version of the HPGA, and an analysis step + that compares these runs with both the high-fidelity reference solution + and the Python-computed HPGA from the initial conditions. + """ + + def __init__(self, component, name): + """ + Create the test case + + Parameters + ---------- + component : polaris.tasks.ocean.Ocean + The ocean component that this task belongs to + + name : str + The name of the test case, which must have a corresponding + .cfg config file in the horiz_press_grad package that + specifies which properties vary betweeen the columns. + """ + subdir = os.path.join('horiz_press_grad', name) + super().__init__(component=component, name=name, subdir=subdir) + + self.config.add_from_package( + 'polaris.tasks.ocean.horiz_press_grad', 'horiz_press_grad.cfg' + ) + self.config.add_from_package( + 'polaris.tasks.ocean.horiz_press_grad', + f'{name}.cfg', + ) + + self._setup_steps() + + def configure(self): + """ + Set config options for the test case + """ + super().configure() + + # set up the steps again in case a user has provided new resolutions + self._setup_steps() + + def _setup_steps(self): + """ + setup steps given resolutions + """ + section = self.config['horiz_press_grad'] + horiz_resolutions = section.getexpression('horiz_resolutions') + vert_resolutions = section.getexpression('vert_resolutions') + + assert horiz_resolutions is not None, ( + 'The "horiz_resolutions" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + assert vert_resolutions is not None, ( + 'The "vert_resolutions" configuration option must be set in the ' + '"horiz_press_grad" section.' + ) + assert len(horiz_resolutions) == len(vert_resolutions), ( + 'The "horiz_resolutions" and "vert_resolutions" configuration ' + 'options must have the same length.' + ) + + # start fresh with no steps + for step in list(self.steps.values()): + self.remove_step(step) + + reference_step = Reference(component=self.component, indir=self.subdir) + self.add_step(reference_step) + + init_steps = dict() + forward_steps = dict() + + for horiz_res, vert_res in zip( + horiz_resolutions, vert_resolutions, strict=True + ): + init_step = Init( + component=self.component, + horiz_res=horiz_res, + vert_res=vert_res, + indir=self.subdir, + ) + self.add_step(init_step) + init_steps[horiz_res] = init_step + + for horiz_res in horiz_resolutions: + forward_step = Forward( + component=self.component, + horiz_res=horiz_res, + indir=self.subdir, + ) + self.add_step(forward_step) + forward_steps[horiz_res] = forward_step + + self.add_step( + Analysis( + component=self.component, + indir=self.subdir, + dependencies={ + 'reference': reference_step, + 'init': init_steps, + 'forward': forward_steps, + }, + ) + ) diff --git a/polaris/tasks/ocean/horiz_press_grad/temperature_gradient.cfg b/polaris/tasks/ocean/horiz_press_grad/temperature_gradient.cfg new file mode 100644 index 0000000000..ec64c02a6b --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/temperature_gradient.cfg @@ -0,0 +1,8 @@ +# config options for two column testcases +[horiz_press_grad] + +# temperatures for piecewise linear initial conditions at the midpoint between +# the two columns and their gradients +temperature_mid = [22.0, 20.0, 14.0, 8.0, 5.0] +# K / km +temperature_grad = [3.0, 1.0, 0.3, 0.2, 0.1] diff --git a/polaris/tasks/ocean/horiz_press_grad/ztilde_gradient.cfg b/polaris/tasks/ocean/horiz_press_grad/ztilde_gradient.cfg new file mode 100644 index 0000000000..29845b4276 --- /dev/null +++ b/polaris/tasks/ocean/horiz_press_grad/ztilde_gradient.cfg @@ -0,0 +1,9 @@ +# config options for two column testcases +[horiz_press_grad] + +# Pseudo-height of the bottom of the ocean (with some buffer compared with +# geometric sea floor height) at the midpoint between the two columns +# and its gradient +z_tilde_bot_mid = -576.0 +# m / km +z_tilde_bot_grad = 0.5 diff --git a/polaris/tasks/ocean/single_column/__init__.py b/polaris/tasks/ocean/single_column/__init__.py index 9999db60f4..5c84a5c574 100644 --- a/polaris/tasks/ocean/single_column/__init__.py +++ b/polaris/tasks/ocean/single_column/__init__.py @@ -8,6 +8,8 @@ def add_single_column_tasks(component): """ Add tasks for various single-column tests + Parameters + ---------- component : polaris.tasks.ocean.Ocean the ocean component that the tasks will be added to """