From 40238fab0c2f98e20553472ba1303c54ebb86c3d Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Fri, 8 Nov 2024 16:39:02 +0100 Subject: [PATCH 01/31] Add Catalyst Plugin as a submodule. --- .gitmodules | 3 +++ CMakeLists.txt | 23 +++++++++++++++++++++++ plugins/catalyst | 1 + vendor/catalyst-2.0.0 | 1 + 4 files changed, 28 insertions(+) create mode 160000 plugins/catalyst create mode 160000 vendor/catalyst-2.0.0 diff --git a/.gitmodules b/.gitmodules index 31608b609..f1281c31d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "vendor/run-clang-format"] path = vendor/run-clang-format url = https://github.com/Sarcasm/run-clang-format.git +[submodule "vendor/catalyst-2.0.0"] + path = vendor/catalyst-2.0.0 + url = https://gitlab.kitware.com/paraview/catalyst diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b077743a..204173e6d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,7 @@ option(BUILD_TRACE_PLUGIN "Build Trace plugin" ON) option(BUILD_USER_CODE_PLUGIN "Build User-code plugin" ON) option(BUILD_JSON_PLUGIN "Build JSON plugin" OFF) option(ENABLE_BENCHMARKING "Activate benchmarks in the test suite" OFF) +option(BUILD_CATALYST_PLUGIN "Build Catalyst plugin" ON) @@ -419,6 +420,21 @@ if("${BUILD_PYTHON}") ) endif() +## Catalyst +if("${BUILD_CATALYST_PLUGIN}") + sbuild_add_dependency(catalyst "${USE_DEFAULT}" + EMBEDDED_PATH "vendor/catalyst-2.0.0" + CMAKE_CACHE_ARGS + "-DBUILD_TESTING:BOOL=OFF" + "-DCATALYST_BUILD_TESTING:BOOL=OFF" + "-DCATALYST_USE_MPI:BOOL=ON" + "-DCATALYST_WRAP_FORTRAN:BOOL=OFF" + "-DCATALYST_WRAP_PYTHON:BOOL=OFF" + VERSION 2.0.0 + ) +endif() + + ## JSON @@ -536,3 +552,10 @@ sbuild_add_module(PDI_API_TESTS INSTALL_COMMAND "" SUBSTEPS test ) + +sbuild_add_module(CATALYST_PLUGIN + ENABLE_BUILD_FLAG BUILD_CATALYST_PLUGIN + SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/plugins/catalyst" + DEPENDS PDI catalyst + SUBSTEPS test +) diff --git a/plugins/catalyst b/plugins/catalyst new file mode 160000 index 000000000..e5c0e77e9 --- /dev/null +++ b/plugins/catalyst @@ -0,0 +1 @@ +Subproject commit e5c0e77e984b1612c459905fc12d5111088212cc diff --git a/vendor/catalyst-2.0.0 b/vendor/catalyst-2.0.0 new file mode 160000 index 000000000..ed6151a29 --- /dev/null +++ b/vendor/catalyst-2.0.0 @@ -0,0 +1 @@ +Subproject commit ed6151a298c6bcc888353e2bdf92a40e6ed8de30 From 39184ce5f042988aad64dc2da298befd9b4eec3e Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Fri, 8 Nov 2024 16:45:17 +0100 Subject: [PATCH 02/31] Update Changelog. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27e19259a..58ef0c66e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -268,6 +268,7 @@ and this project adheres to * NetCDF plugin now support the size_of attribute, fixes [#446](https://gitlab.maisondelasimulation.fr/pdidev/pdi/-/issues/446) * Deisa plugin for in-situ analysis using Dask +* Add Catalyst plugin from Kitware #### Changed * Update the version of dependencies according to our policy: oldest supported From 167db35430967fc8dd697c7f2c122a84602eac2a Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Tue, 28 Jan 2025 17:31:05 +0100 Subject: [PATCH 03/31] Remove folder. --- plugins/catalyst | 1 - 1 file changed, 1 deletion(-) delete mode 160000 plugins/catalyst diff --git a/plugins/catalyst b/plugins/catalyst deleted file mode 160000 index e5c0e77e9..000000000 --- a/plugins/catalyst +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e5c0e77e984b1612c459905fc12d5111088212cc From 6155b93dfbd6ea9ba5bb827fd3c4b7c2626f6f71 Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Tue, 28 Jan 2025 17:33:10 +0100 Subject: [PATCH 04/31] Add plugin catalyst from https://gitlab.kitware.com/keu-public/pdi-catalyst-plugin --- plugins/catalyst/.clang-format | 24 + plugins/catalyst/.gitignore | 1 + plugins/catalyst/CMakeLists.txt | 22 + plugins/catalyst/LICENSE | 176 ++++++ plugins/catalyst/NOTICE | 13 + plugins/catalyst/README.md | 53 ++ plugins/catalyst/pdi_catalyst_plugin.cxx | 499 ++++++++++++++++++ plugins/catalyst/pdi_catalyst_plugin.h | 51 ++ plugins/catalyst/test/Attributes.cxx | 46 ++ plugins/catalyst/test/Attributes.h | 27 + plugins/catalyst/test/CMakeLists.txt | 25 + plugins/catalyst/test/Grid.cxx | 101 ++++ plugins/catalyst/test/Grid.h | 22 + plugins/catalyst/test/PDIAdaptor.cxx | 87 +++ plugins/catalyst/test/PDIAdaptor.h | 16 + plugins/catalyst/test/catalyst_pipeline.py | 17 + .../test/catalyst_pipeline_with_rendering.py | 81 +++ plugins/catalyst/test/main.cxx | 53 ++ plugins/catalyst/test/pdi.yml.in | 129 +++++ .../test/references/execute_reference.json | 70 +++ .../references/execute_reference_rank0.json | 70 +++ .../references/execute_reference_rank1.json | 70 +++ .../references/execute_reference_rank2.json | 70 +++ .../references/execute_reference_rank3.json | 70 +++ .../test/references/finalize_reference.json | 1 + .../test/references/initialize_reference.json | 10 + plugins/catalyst/test/run_test.py | 46 ++ plugins/catalyst/test/run_test_mpi.py | 51 ++ 28 files changed, 1901 insertions(+) create mode 100644 plugins/catalyst/.clang-format create mode 100644 plugins/catalyst/.gitignore create mode 100644 plugins/catalyst/CMakeLists.txt create mode 100644 plugins/catalyst/LICENSE create mode 100644 plugins/catalyst/NOTICE create mode 100644 plugins/catalyst/README.md create mode 100644 plugins/catalyst/pdi_catalyst_plugin.cxx create mode 100644 plugins/catalyst/pdi_catalyst_plugin.h create mode 100644 plugins/catalyst/test/Attributes.cxx create mode 100644 plugins/catalyst/test/Attributes.h create mode 100644 plugins/catalyst/test/CMakeLists.txt create mode 100644 plugins/catalyst/test/Grid.cxx create mode 100644 plugins/catalyst/test/Grid.h create mode 100644 plugins/catalyst/test/PDIAdaptor.cxx create mode 100644 plugins/catalyst/test/PDIAdaptor.h create mode 100644 plugins/catalyst/test/catalyst_pipeline.py create mode 100644 plugins/catalyst/test/catalyst_pipeline_with_rendering.py create mode 100644 plugins/catalyst/test/main.cxx create mode 100644 plugins/catalyst/test/pdi.yml.in create mode 100644 plugins/catalyst/test/references/execute_reference.json create mode 100644 plugins/catalyst/test/references/execute_reference_rank0.json create mode 100644 plugins/catalyst/test/references/execute_reference_rank1.json create mode 100644 plugins/catalyst/test/references/execute_reference_rank2.json create mode 100644 plugins/catalyst/test/references/execute_reference_rank3.json create mode 100644 plugins/catalyst/test/references/finalize_reference.json create mode 100644 plugins/catalyst/test/references/initialize_reference.json create mode 100644 plugins/catalyst/test/run_test.py create mode 100644 plugins/catalyst/test/run_test_mpi.py diff --git a/plugins/catalyst/.clang-format b/plugins/catalyst/.clang-format new file mode 100644 index 000000000..6035c4080 --- /dev/null +++ b/plugins/catalyst/.clang-format @@ -0,0 +1,24 @@ +# Note: if you change any of the settings here, please reformat the entire +# codebase as part of the same commit, that will prevent subsequent commits +# from being flagged as being improperly formatted. + +--- +# This configuration requires clang-format 8.0 or higher. +BasedOnStyle: Mozilla +AlignAfterOpenBracket: DontAlign +AlignOperands: false +AlwaysBreakAfterReturnType: None +AlwaysBreakAfterDefinitionReturnType: None +BreakBeforeBraces: Allman +BinPackArguments: true +BinPackParameters: true +ColumnLimit: 100 +SpaceAfterTemplateKeyword: true +Standard: Cpp11 +StatementMacros: +- vtkAbstractTypeMacro +- vtkTypeMacro +- vtkBaseTypeMacro +- vtkAbstractTemplateTypeMacro +- vtkAbstractTypeMacroWithNewInstanceType +... diff --git a/plugins/catalyst/.gitignore b/plugins/catalyst/.gitignore new file mode 100644 index 000000000..40776c217 --- /dev/null +++ b/plugins/catalyst/.gitignore @@ -0,0 +1 @@ +test/__pycache__/ diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt new file mode 100644 index 000000000..1ad9d2703 --- /dev/null +++ b/plugins/catalyst/CMakeLists.txt @@ -0,0 +1,22 @@ +cmake_minimum_required(VERSION 3.13) +project(pdi_catalyst_plugin LANGUAGES C CXX) + +find_package(PDI REQUIRED COMPONENTS plugins) +find_package(Catalyst REQUIRED) +if(NOT ${CATALYST_USE_MPI}) + message(FATAL_ERROR "No MPI support in your Catalyst library, please activate MPI in your Catalyst build.") +endif() + +add_library(pdi_catalyst_plugin MODULE pdi_catalyst_plugin.h pdi_catalyst_plugin.cxx) +target_link_libraries(pdi_catalyst_plugin PDI::PDI_plugins catalyst::catalyst) + +# installation +set(INSTALL_PDIPLUGINDIR "${PDI_DEFAULT_PLUGINDIR}" CACHE PATH "PDI plugins (${PDI_DEFAULT_PLUGINDIR})") +install(TARGETS pdi_catalyst_plugin + LIBRARY DESTINATION "${INSTALL_PDIPLUGINDIR}" +) + +if(BUILD_TESTING) + enable_testing() + add_subdirectory(test) +endif() diff --git a/plugins/catalyst/LICENSE b/plugins/catalyst/LICENSE new file mode 100644 index 000000000..d9a10c0d8 --- /dev/null +++ b/plugins/catalyst/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/plugins/catalyst/NOTICE b/plugins/catalyst/NOTICE new file mode 100644 index 000000000..81ea1e139 --- /dev/null +++ b/plugins/catalyst/NOTICE @@ -0,0 +1,13 @@ +Copyright 2024 Kitware, SAS + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/plugins/catalyst/README.md b/plugins/catalyst/README.md new file mode 100644 index 000000000..3b8d722fb --- /dev/null +++ b/plugins/catalyst/README.md @@ -0,0 +1,53 @@ +PDI Catalyst Plugin +=================== + +This PDI plugin pushes PDI shared data to the Catalyst 2 API. The goal is to leverage the numerous Catalyst implementations like [Catalyst-ParaView](https://gitlab.kitware.com/paraview/paraview) or [Catalyst-ADIOS2](https://gitlab.kitware.com/paraview/adioscatalyst), helping massive data analysis and visualization at exascale. + +# Build Instructions + + - Build and Install [PDI](https://pdi.dev/master/index.html) + - Build and Install [Catalyst](https://gitlab.kitware.com/paraview/catalyst), with MPI support. + - Configure with CMake with variables: + * `PDI_DIR` points to `pdi/install/folder/share/pdi/cmake` + * `paraconf_DIR` points to `pdi/install/folder/share/paraconf/cmake` + * `Catalyst_DIR` points to `catalyst/install/folder/lib/cmake/catalyst-2.0` + * optional: `BUILD_TESTING=ON` to build the example test + * in case of you used vendored version of libraries during your PDI build, instead of system libraries, you may have to define additional PDI dependencies locations. For example `spdlog_DIR` CMake variable for the spdlog library. + - Build with `make` or `ninja` + +# Running the Test + +The test executable expects the config yaml file as arguments. + +To use the Catalyst-ParaView implementation, you should also set the following environment variables: + - `CATALYST_IMPLEMENTATION_NAME=paraview` + - `CATALYST_IMPLEMENTATION_PATHS=path/to/paraview/install/lib/catalyst` + +and likely add the catalyst lib folder to `LD_LIBRARY_PATH` if the catalyst library is installed in a non-standard location. + +# Design Considerations + +*This is a work-in-progress. This paragraph is subject to change.* + +PDI describes data through a [Specification Tree](https://pdi.dev/master/Concepts.html#Specification_tree), written in the YAML format and provided to PDI at initialization. Catalyst describes data with [Conduit](https://llnl-conduit.readthedocs.io/en/latest/index.html) nodes and [Mesh Blueprint](https://llnl-conduit.readthedocs.io/en/latest/blueprint_mesh.html#) protocol, provided at execution. + +Both protocols are very similars, because they are just hierarchical dictionnary with metadata about the shared memories. +However, Catalyst requires additional semantic about meanings of the data, to map the memory chunk to mesh description (structured mesh, unstructured mesh, image data, AMR, etc.). +The current approach is to add this semantic to the PDI Specification Tree under the `catalyst` key. See the [example file](test/pdi.yml.in) for actual implementation. + +PDI is very flexible about the timing of the data sharing using an advanced event mechanism, whereas Catalyst needs all data at the same point in time. +So, the user of this plugin should set an event name referenced by the `PDI_execute_event_name` key in the yaml config, in order to trigger the call to `catalyst_execute`. Data should have been shared either before the event or during the event using the `PDI_multi_expose` function. + +Internally, `catalyst_initialize` is called by `PDI_Init` and `catalyst_finalize` is called by `PDI_finalize`. + +In the specification tree, the `PDI_data` key indicates that the conduit node data should be set as external pointer to a data from the PDI data store. There is several subkeys to describe this data, like `name`, `offset`, `stride`, `multiply` to try to match every possible memory layout cases. + +# License + +This repository is under the Apache 2.0 license, see NOTICE and LICENSE file. + +The test case is a modification of the Catalyst2 CxxFullExample code from the ParaView source code, licenced under BSD-3-Clauses. + +Developed by Kitware SAS (Kitware Europe), motivated by the [NumPEx](https://numpex.org/) program. + +Reach us at https://www.kitware.com/contact/ diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx new file mode 100644 index 000000000..1971966ad --- /dev/null +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -0,0 +1,499 @@ +#include "pdi_catalyst_plugin.h" + +#include "catalyst.hpp" + +#include +#include + +catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) + : Plugin{ ctx } + , SpecTree(spec_tree) +{ + ctx.callbacks().add_init_callback([this]() { this->ProcessPDIInit(); }); + ctx.callbacks().add_data_callback( + [this](const std::string& data_name, PDI::Ref ref) { this->ProcessData(data_name, ref); }); + ctx.callbacks().add_event_callback( + [this](const std::string& event_name) { this->ProcessEvent(event_name); }); +} + +catalyst_plugin::~catalyst_plugin() +{ + RunCatalystFinalize(); +} + +void catalyst_plugin::ProcessPDIInit() +{ + this->RunCatalystInitialize(); + this->PDIExecuteEventName = this->ReadPDIExecuteEventName(); +} + +void catalyst_plugin::ProcessData(const std::string& data_name, PDI::Ref ref) +{ + context().logger().debug("User has shared a data named {}", data_name); + auto it = this->CurrentPDIData.find(data_name); + if (it != this->CurrentPDIData.end()) + { + context().logger().warn( + "Data named '{}' already recorded, the previous value will overwritten.", data_name); + it->second = ref.copy(); + } + else + { + this->CurrentPDIData.emplace(data_name, ref); + } +} + +void catalyst_plugin::ProcessEvent(const std::string& event_name) +{ + if (event_name == this->PDIExecuteEventName) + { + RunCatalystExecute(); + } +} + +void catalyst_plugin::RunCatalystInitialize() +{ + conduit_cpp::Node node; + auto scripts_node = node["catalyst/scripts"]; + auto scripts_spec = PC_get(this->SpecTree, ".scripts"); + int script_number = 0; + PC_len(scripts_spec, &script_number); + for (int i = 0; i < script_number; ++i) + { + auto key = PC_get(scripts_spec, "{%d}", i); + auto value = PC_get(scripts_spec, "<%d>", i); + scripts_node[PDI::to_string(key)] = PDI::to_string(value); + } + + context().logger().debug("catalyst_initialize call..."); + auto result = catalyst_initialize(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) + { + context().logger().error("catalyst_initialize failure"); + } +} + +void catalyst_plugin::RunCatalystExecute() +{ + conduit_cpp::Node node; + + auto execute_spec = PC_get(this->SpecTree, ".execute"); + + // walk the spec tree and create corresponding catalyst nodes. + struct SpecTreeNode + { + PC_tree_t tree; + std::string name; + conduit_node* parentNode; + }; + + std::stack remainingTreeAndParentNode; + remainingTreeAndParentNode.push({ execute_spec, "catalyst", conduit_cpp::c_node(&node) }); + while (!remainingTreeAndParentNode.empty()) + { + auto current = remainingTreeAndParentNode.top(); + remainingTreeAndParentNode.pop(); + + auto current_node = conduit_cpp::cpp_node(current.parentNode)[current.name]; + + switch (current.tree.node->type) + { + case YAML_NO_NODE: + context().logger().error("Unsupported Empty YAML Node for variable {}", current.name); + break; + case YAML_SCALAR_NODE: + switch (current.tree.node->data.scalar.style) + { + case YAML_PLAIN_SCALAR_STYLE: + // TODO: handle float/double type. + current_node.set_int64(PDI::to_long(current.tree)); + break; + case YAML_SINGLE_QUOTED_SCALAR_STYLE: + case YAML_DOUBLE_QUOTED_SCALAR_STYLE: + current_node.set_string(PDI::to_string(current.tree)); + break; + case YAML_LITERAL_SCALAR_STYLE: + case YAML_FOLDED_SCALAR_STYLE: + case YAML_ANY_SCALAR_STYLE: + context().logger().error("Unsupported YAML scalar style for variable {}", current.name); + break; + } + break; + case YAML_SEQUENCE_NODE: + context().logger().error("Unsupported Sequence YAML Node for variable {}", current.name); + break; + case YAML_MAPPING_NODE: + int data_tree_size = PDI::len(current.tree); + // Check for dynamic PDI Data. + if (data_tree_size == 1) + { + auto key = PC_get(current.tree, "{%d}", 0); + if (PDI::to_string(key) == "PDI_data") + { + auto value = PC_get(current.tree, "<%d>", 0); + this->FillNodeWithPDIData(conduit_cpp::c_node(¤t_node), value); + break; + } + } + // reverse order to get the correct order when poping the stack. + for (int i = data_tree_size - 1; i >= 0; --i) + { + auto key = PC_get(current.tree, "{%d}", i); + auto value = PC_get(current.tree, "<%d>", i); + // std::cout << "Mapping Node: " << PDI::to_string(key) << std::endl; + remainingTreeAndParentNode.push( + { value, PDI::to_string(key), conduit_cpp::c_node(¤t_node) }); + } + break; + } + } + + if (context().logger().level() == spdlog::level::debug || + context().logger().level() == spdlog::level::trace) + { + node.print(); + } + context().logger().debug("catalyst_execute call..."); + auto result = catalyst_execute(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) + { + context().logger().error("catalyst_execute failure"); + } + + this->CurrentPDIData.clear(); +} + +void catalyst_plugin::RunCatalystFinalize() +{ + context().logger().debug("catalyst_finalize call..."); + conduit_cpp::Node node; + auto result = catalyst_finalize(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) + { + context().logger().error("catalyst_finalize failure"); + } +} + +void catalyst_plugin::FillNodeWithPDIData(conduit_node* node, PC_tree_t tree) +{ + auto name_spec = PC_get(tree, ".name"); + if (PC_status(name_spec)) + { + context().logger().error("No \"name\" child in PDI_data spec."); + return; + } + + std::string name = PDI::to_string(name_spec); + auto it = this->CurrentPDIData.find(name); + if (it == this->CurrentPDIData.end()) + { + context().logger().error("Can't find the PDI_data named: {}", name); + return; + } + auto ref = it->second; + PDI::Ref_r ref_r{ ref }; + if (!ref_r) + { + context().logger().error("The PDIData named \"{}\" is not readable.", name); + return; + } + + auto data_type = ref_r.type(); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) + { + FillNodeWithScalarPDIData(node, name, *scalar_datatype, ref_r); + } + else if (auto array_datatype = std::dynamic_pointer_cast(data_type)) + { + FillNodeWithArrayPDIData(node, name, tree, *array_datatype, ref_r); + } + else + { + context().logger().error("Unsupported datatype for variable: {}", name); + } +} + +void catalyst_plugin::FillNodeWithScalarPDIData(conduit_node* node, const std::string& name, + const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r) +{ + PDI::Scalar_kind scalar_kind = scalar_datatype.kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) + { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_int8)) + { + catalyst_conduit_node_set_int8(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_int16)) + { + catalyst_conduit_node_set_int16(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_int32)) + { + catalyst_conduit_node_set_int32(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_int64)) + { + catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); + } + else + { + context().logger().error( + "Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + } + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) + { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_uint8)) + { + catalyst_conduit_node_set_uint8(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_uint16)) + { + catalyst_conduit_node_set_uint16(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_uint32)) + { + catalyst_conduit_node_set_uint32(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_uint64)) + { + catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); + } + else + { + context().logger().error( + "Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + } + } + else if (scalar_kind == PDI::Scalar_kind::FLOAT) + { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_float32)) + { + catalyst_conduit_node_set_float32(node, *static_cast(ref_r.get())); + } + else if (buffer_size == sizeof(conduit_float64)) + { + catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); + } + else + { + context().logger().error( + "Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + } + } + else + { + context().logger().error("Unknown Scalar Type for variable {}", name); + } +} + +void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::string& name, + PC_tree_t& tree, const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r) +{ + PDI::Datatype_sptr type = array_datatype.subtype(); + while (auto&& array_type = std::dynamic_pointer_cast(type)) + { + type = array_type->subtype(); + } + auto scalar_datatype = std::dynamic_pointer_cast(type); + if (!scalar_datatype) + { + context().logger().error("Array subtype of variable {} should be scalar type.", name); + return; + } + + conduit_index_t num_elements = 0; + auto size_spec = PC_get(tree, ".size"); + if (PC_status(size_spec) == PC_OK) + { + num_elements = GetLongValueFromSpecNode(size_spec, name); + } + + conduit_index_t offset = 0; + auto offset_spec = PC_get(tree, ".offset"); + if (PC_status(offset_spec) == PC_OK) + { + offset = GetLongValueFromSpecNode(offset_spec, name); + } + + conduit_index_t stride = 1; + auto stride_spec = PC_get(tree, ".stride"); + if (PC_status(stride_spec) == PC_OK) + { + stride = GetLongValueFromSpecNode(stride_spec, name); + } + + conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; + + PDI::Scalar_kind scalar_kind = scalar_datatype->kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) + { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_int8)) + { + conduit_index_t element_bytes = 1; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int8_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_int16)) + { + conduit_index_t element_bytes = 2; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int16_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_int32)) + { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int32_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_int64)) + { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int64_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else + { + context().logger().error( + "Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + } + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) + { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_uint8)) + { + conduit_index_t element_bytes = 1; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint8_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_uint16)) + { + conduit_index_t element_bytes = 2; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint16_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_uint32)) + { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint32_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_uint64)) + { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint64_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else + { + context().logger().error( + "Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + } + } + else if (scalar_kind == PDI::Scalar_kind::FLOAT) + { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_float32)) + { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_float32_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else if (buffer_size == sizeof(conduit_float64)) + { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_float64_ptr_detailed(node, pointer, num_elements, + offset * element_bytes, stride * element_bytes, element_bytes, endianness); + } + else + { + context().logger().error( + "Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + } + } + else + { + context().logger().error("Unknown Scalar Type for variable {}", name); + } +} + +long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::string& name) +{ + if (spec.node->type == YAML_SCALAR_NODE) + { + return PDI::to_long(spec); + } + else if (spec.node->type == YAML_MAPPING_NODE) + { + auto pdi_data_spec = PC_get(spec, ".PDI_data"); + if (PC_status(pdi_data_spec)) + { + context().logger().error("Unsupported mapping under the variable: {}", name); + return 0; + } + auto name_spec = PC_get(pdi_data_spec, ".name"); + if (PC_status(name_spec)) + { + context().logger().error("No \"name\" child in PDI_data spec."); + return 0; + } + + std::string variable_name = PDI::to_string(name_spec); + auto it = this->CurrentPDIData.find(variable_name); + if (it == this->CurrentPDIData.end()) + { + context().logger().error("Can't find the PDI_data named: {}", variable_name); + } + auto ref = it->second; + PDI::Ref_r ref_r{ ref }; + if (!ref_r) + { + context().logger().error("The PDIData named \"{}\" is not readable.", variable_name); + return 0; + } + + auto scalar_datatype = std::dynamic_pointer_cast(ref_r.type()); + if (!scalar_datatype) + { + context().logger().error( + "PDI Data subtype of variable {} should be scalar type.", variable_name); + return 0; + } + auto value = ref_r.scalar_value(); + + long multiply = 1; + auto multiply_spec = PC_get(pdi_data_spec, ".multiply"); + if (!PC_status(multiply_spec)) + { + multiply = PDI::to_long(multiply_spec); + } + return value * multiply; + } + return 0; +} + +std::string catalyst_plugin::ReadPDIExecuteEventName() +{ + std::string eventName; + auto execute_spec = PC_get(this->SpecTree, ".PDI_execute_event_name"); + if (PC_status(execute_spec) == PC_OK) + { + eventName = PDI::to_string(execute_spec); + } + return eventName; +} diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h new file mode 100644 index 000000000..a6300305a --- /dev/null +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -0,0 +1,51 @@ +#ifndef CATALYST_PLUGIN_H +#define CATALYST_PLUGIN_H + +#include +#include +#include + +struct conduit_node_impl; +typedef struct conduit_node_impl conduit_node; + +/** + * @brief Translates PDI calls to Catalyst calls. + * + * The PDI Catalyst Plugin is an adapter to call Catalyst API (initialize, execute, finalize) + * from PDI API calls (PDI_init, PDI_multi_expose, PDI_finalize). + * + * It leverages the specification tree to copy only pointer to data. The conduit node structure of + * the catalyst_execute call is defined in the spec tree, and dynamic data are referenced with the + * special keyword "PDI_data". + * + */ +class catalyst_plugin : public PDI::Plugin +{ +public: + catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree); + ~catalyst_plugin(); + +private: + void ProcessPDIInit(); + void ProcessData(const std::string& data_name, PDI::Ref ref); + void ProcessEvent(const std::string& event_name); + + void RunCatalystInitialize(); + void RunCatalystExecute(); + void RunCatalystFinalize(); + void FillNodeWithPDIData(conduit_node* node, PC_tree_t tree); + void FillNodeWithScalarPDIData(conduit_node* node, const std::string& name, + const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); + void FillNodeWithArrayPDIData(conduit_node* node, const std::string& name, PC_tree_t& tree, + const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r); + long GetLongValueFromSpecNode(PC_tree_t& spec, const std::string& name); + std::string ReadPDIExecuteEventName(); + + PC_tree_t SpecTree; + std::unordered_map CurrentPDIData; + std::string PDIExecuteEventName; +}; + +PDI_PLUGIN(catalyst) + +#endif // CATALYST_PLUGIN_H diff --git a/plugins/catalyst/test/Attributes.cxx b/plugins/catalyst/test/Attributes.cxx new file mode 100644 index 000000000..3d027a423 --- /dev/null +++ b/plugins/catalyst/test/Attributes.cxx @@ -0,0 +1,46 @@ +#include "Attributes.h" + +#include "Grid.h" + +Attributes::Attributes(Grid* grid) +{ + this->GridPtr = grid; +} + +Attributes::~Attributes() +{ + this->GridPtr = nullptr; +} + +void Attributes::UpdateFields(double time) +{ + size_t numPoints = this->GridPtr->GetNumberOfPoints(); + this->Velocity.resize(numPoints * 3); + for (size_t pt = 0; pt < numPoints; pt++) + { + const double* coord = this->GridPtr->GetPoint(pt); + this->Velocity[pt] = coord[1] * time; + } + std::fill(this->Velocity.begin() + numPoints, this->Velocity.end(), 0.); + size_t numCells = this->GridPtr->GetNumberOfCells(); + this->Pressure.resize(numCells); + std::fill(this->Pressure.begin(), this->Pressure.end(), 1.f); +} + +double* Attributes::GetVelocityArray() +{ + if (this->Velocity.empty()) + { + return nullptr; + } + return &this->Velocity[0]; +} + +float* Attributes::GetPressureArray() +{ + if (this->Pressure.empty()) + { + return nullptr; + } + return &this->Pressure[0]; +} diff --git a/plugins/catalyst/test/Attributes.h b/plugins/catalyst/test/Attributes.h new file mode 100644 index 000000000..7dafc58e7 --- /dev/null +++ b/plugins/catalyst/test/Attributes.h @@ -0,0 +1,27 @@ +#ifndef ATTRIBUTES_H +#define ATTRIBUTES_H + +#include +class Grid; + +class Attributes +{ + // A class for generating and storing point and cell fields. + // Velocity is stored at the points and pressure is stored + // for the cells. The current velocity profile is for a + // shearing flow with U(y,t) = y*t, V = 0 and W = 0. + // Pressure is constant through the domain. +public: + Attributes(Grid* grid); + ~Attributes(); + void UpdateFields(double time); + double* GetVelocityArray(); + float* GetPressureArray(); + +private: + std::vector Velocity; + std::vector Pressure; + Grid* GridPtr; +}; + +#endif // ATTRIBUTES_H diff --git a/plugins/catalyst/test/CMakeLists.txt b/plugins/catalyst/test/CMakeLists.txt new file mode 100644 index 000000000..48a1cffe3 --- /dev/null +++ b/plugins/catalyst/test/CMakeLists.txt @@ -0,0 +1,25 @@ + +add_executable(TestPDICatalyst + Grid.cxx + Grid.h + Attributes.h + Attributes.cxx + main.cxx + PDIAdaptor.h + PDIAdaptor.cxx) + + +find_package(MPI COMPONENTS C CXX REQUIRED) +target_link_libraries(TestPDICatalyst + PRIVATE + MPI::MPI_C + MPI::MPI_CXX + paraconf::paraconf + PDI::pdi) + +set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/test) +configure_file(pdi.yml.in pdi.yml) + +find_package(Python3 COMPONENTS Interpreter) +add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test/run_test.py" "${CMAKE_BINARY_DIR}/test/" "${CMAKE_SOURCE_DIR}/test/") +add_test(NAME TestPDICatalystMPI COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test/run_test_mpi.py" "${CMAKE_BINARY_DIR}/test/" "${CMAKE_SOURCE_DIR}/test/" "${MPIEXEC_EXECUTABLE}") diff --git a/plugins/catalyst/test/Grid.cxx b/plugins/catalyst/test/Grid.cxx new file mode 100644 index 000000000..706998473 --- /dev/null +++ b/plugins/catalyst/test/Grid.cxx @@ -0,0 +1,101 @@ +#include "Grid.h" + +#include +#include +#include +#include + +Grid::Grid(const unsigned int numPoints[3], const double spacing[3]) +{ + if (numPoints[0] == 0 || numPoints[1] == 0 || numPoints[2] == 0) + { + throw std::runtime_error("Must have a non-zero amount of points in each direction."); + } + // in parallel, we do a simple partitioning in the x-direction. + int mpiSize = 1; + int mpiRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); + MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + + unsigned int startXPoint = mpiRank * numPoints[0] / mpiSize; + unsigned int endXPoint = (mpiRank + 1) * numPoints[0] / mpiSize; + if (mpiSize != mpiRank + 1) + { + endXPoint++; + } + + // create the points -- slowest in the x and fastest in the z directions + double coord[3] = { 0, 0, 0 }; + for (unsigned int i = startXPoint; i < endXPoint; i++) + { + coord[0] = i * spacing[0]; + for (unsigned int j = 0; j < numPoints[1]; j++) + { + coord[1] = j * spacing[1]; + for (unsigned int k = 0; k < numPoints[2]; k++) + { + coord[2] = k * spacing[2]; + // add the coordinate to the end of the vector + std::copy(coord, coord + 3, std::back_inserter(this->Points)); + } + } + } + // create the hex cells + unsigned int cellPoints[8]; + unsigned int numXPoints = endXPoint - startXPoint; + for (unsigned int i = 0; i < numXPoints - 1; i++) + { + for (unsigned int j = 0; j < numPoints[1] - 1; j++) + { + for (unsigned int k = 0; k < numPoints[2] - 1; k++) + { + cellPoints[0] = i * numPoints[1] * numPoints[2] + j * numPoints[2] + k; + cellPoints[1] = (i + 1) * numPoints[1] * numPoints[2] + j * numPoints[2] + k; + cellPoints[2] = (i + 1) * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k; + cellPoints[3] = i * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k; + cellPoints[4] = i * numPoints[1] * numPoints[2] + j * numPoints[2] + k + 1; + cellPoints[5] = (i + 1) * numPoints[1] * numPoints[2] + j * numPoints[2] + k + 1; + cellPoints[6] = (i + 1) * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k + 1; + cellPoints[7] = i * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k + 1; + std::copy(cellPoints, cellPoints + 8, std::back_inserter(this->Cells)); + } + } + } +} + +size_t Grid::GetNumberOfPoints() const +{ + return this->Points.size() / 3; +} + +size_t Grid::GetNumberOfCells() const +{ + return this->Cells.size() / 8; +} + +const double* Grid::GetPointsArray() const +{ + if (this->Points.empty()) + { + return nullptr; + } + return this->Points.data(); +} + +const double* Grid::GetPoint(size_t pointId) const +{ + if (pointId >= this->Points.size()) + { + return nullptr; + } + return &(this->Points[pointId * 3]); +} + +const unsigned int* Grid::GetCellPoints(size_t cellId) const +{ + if (cellId >= this->Cells.size()) + { + return nullptr; + } + return &(this->Cells[cellId * 8]); +} diff --git a/plugins/catalyst/test/Grid.h b/plugins/catalyst/test/Grid.h new file mode 100644 index 000000000..83ee2919f --- /dev/null +++ b/plugins/catalyst/test/Grid.h @@ -0,0 +1,22 @@ +#ifndef GRID_H +#define GRID_H + +#include +#include + +class Grid +{ +public: + Grid(const unsigned int numPoints[3], const double spacing[3]); + size_t GetNumberOfPoints() const; + size_t GetNumberOfCells() const; + const double* GetPointsArray() const; + const double* GetPoint(size_t pointId) const; + const unsigned int* GetCellPoints(size_t cellId) const; + +private: + std::vector Points; + std::vector Cells; +}; + +#endif diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/test/PDIAdaptor.cxx new file mode 100644 index 000000000..cc45967d3 --- /dev/null +++ b/plugins/catalyst/test/PDIAdaptor.cxx @@ -0,0 +1,87 @@ +#include "PDIAdaptor.h" + +#include "Attributes.h" +#include "Grid.h" + +#include +#include + +namespace PDIAdaptor +{ + +bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) +{ + PC_tree_t conf = PC_parse_path(pdi_yaml_config_file_path.c_str()); + auto status = PDI_init(PC_get(conf, "")); + if (status != PDI_status_t::PDI_OK) + { + return false; + } + + auto points_array_size = grid.GetNumberOfPoints() * 3; + status = PDI_expose("points_array_size", &points_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) + { + return false; + } + + auto number_of_cells = grid.GetNumberOfCells(); + status = PDI_expose("cell_points_size", &number_of_cells, PDI_OUT); + if (status != PDI_status_t::PDI_OK) + { + return false; + } + + auto velocity_array_size = grid.GetNumberOfPoints() * 3; + status = PDI_expose("velocity_array_size", &velocity_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) + { + return false; + } + + auto pressure_array_size = grid.GetNumberOfCells(); + status = PDI_expose("pressure_array_size", &pressure_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) + { + return false; + } + + return true; +} + +bool Execute(int cycle, double time, Grid& grid, Attributes& attribs) +{ + auto number_of_points = grid.GetNumberOfPoints(); + auto number_of_cells = grid.GetNumberOfCells(); + + auto status = PDI_multi_expose( + // + "catalyst_execute", + // + "cycle", &cycle, PDI_OUT, + // + "time", &time, PDI_OUT, + // + "points_array", grid.GetPointsArray(), PDI_OUT, + // + "number_of_points", &number_of_points, PDI_OUT, + // + "cell_points", grid.GetCellPoints(0), PDI_OUT, + // + "number_of_cells", &number_of_cells, PDI_OUT, + // + "velocity_array", attribs.GetVelocityArray(), PDI_OUT, + // + "pressure_array", attribs.GetPressureArray(), PDI_OUT, + // + NULL); + + return status == PDI_status_t::PDI_OK; +} + +bool Finalize() +{ + auto status = PDI_finalize(); + return status == PDI_status_t::PDI_OK; +} +} diff --git a/plugins/catalyst/test/PDIAdaptor.h b/plugins/catalyst/test/PDIAdaptor.h new file mode 100644 index 000000000..a00dbfbfc --- /dev/null +++ b/plugins/catalyst/test/PDIAdaptor.h @@ -0,0 +1,16 @@ +#ifndef PDI_ADAPTOR_H +#define PDI_ADAPTOR_H + +#include + +class Grid; +class Attributes; + +namespace PDIAdaptor +{ +bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid); +bool Execute(int cycle, double time, Grid& grid, Attributes& attribs); +bool Finalize(); +} + +#endif diff --git a/plugins/catalyst/test/catalyst_pipeline.py b/plugins/catalyst/test/catalyst_pipeline.py new file mode 100644 index 000000000..7451835fb --- /dev/null +++ b/plugins/catalyst/test/catalyst_pipeline.py @@ -0,0 +1,17 @@ +from paraview.simple import * + +# Greeting to ensure that ctest knows this script is being imported +print("executing catalyst_pipeline") + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) + print("velocity-magnitude-range:", producer.PointData["velocity"].GetRange(-1)) + print("pressure-range:", producer.CellData["pressure"].GetRange(0)) diff --git a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py new file mode 100644 index 000000000..12640fde8 --- /dev/null +++ b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py @@ -0,0 +1,81 @@ +# script-version: 2.0 +from paraview.simple import * +from paraview import catalyst +import time + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +# ---------------------------------------------------------------- +# setup views used in the visualization +# ---------------------------------------------------------------- + +# Create a new 'Render View' +renderView1 = CreateView('RenderView') +renderView1.ViewSize = [1600,800] +renderView1.CameraPosition = [157.90070691620653, 64.91180236667495, 167.90421495515105] +renderView1.CameraFocalPoint = [19.452526958533134, 28.491610229010647, 10.883993417012459] +renderView1.CameraViewUp = [0.07934883419275315, 0.953396338566962, -0.2910999555468221] +renderView1.CameraFocalDisk = 1.0 +renderView1.CameraParallelScale = 54.99504523136608 + +# get color transfer function/color map for 'velocity' +velocityLUT = GetColorTransferFunction('velocity') +velocityLUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, 29.205000000000002, 0.865003, 0.865003, 0.865003, 58.410000000000004, 0.705882, 0.0156863, 0.14902] +velocityLUT.ScalarRangeInitialized = 1.0 + +# show data from grid +gridDisplay = Show(producer, renderView1, 'UnstructuredGridRepresentation') + +gridDisplay.Representation = 'Surface' +gridDisplay.ColorArrayName = ['POINTS', 'velocity'] +gridDisplay.LookupTable = velocityLUT + +# get color legend/bar for velocityLUT in view renderView1 +velocityLUTColorBar = GetScalarBar(velocityLUT, renderView1) +velocityLUTColorBar.Title = 'velocity' +velocityLUTColorBar.ComponentTitle = 'Magnitude' + +# set color bar visibility +velocityLUTColorBar.Visibility = 1 + +# show color legend +gridDisplay.SetScalarBarVisibility(renderView1, True) + +# ---------------------------------------------------------------- +# setup extractors +# ---------------------------------------------------------------- + +SetActiveView(renderView1) +# create extractor +pNG1 = CreateExtractor('PNG', renderView1, registrationName='PNG1') +# trace defaults for the extractor. +pNG1.Trigger = 'TimeStep' + +# init the 'PNG' selected for 'Writer' +pNG1.Writer.FileName = 'screenshot_{timestep:06d}.png' +pNG1.Writer.ImageResolution = [1600,800] +pNG1.Writer.Format = 'PNG' + +# ------------------------------------------------------------------------------ +# Catalyst options +options = catalyst.Options() +options.EnableCatalystLive = 1 + + +# Greeting to ensure that ctest knows this script is being imported +print("executing catalyst_pipeline") +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) + print("velocity-magnitude-range:", producer.PointData["velocity"].GetRange(-1)) + print("pressure-range:", producer.CellData["pressure"].GetRange(0)) + # In a real simulation sleep is not needed. We use it here to slow down the + # "simulation" and make sure ParaView client can catch up with the produced + # results instead of having all of them flashing at once. + if options.EnableCatalystLive: + time.sleep(1) diff --git a/plugins/catalyst/test/main.cxx b/plugins/catalyst/test/main.cxx new file mode 100644 index 000000000..6330e2a65 --- /dev/null +++ b/plugins/catalyst/test/main.cxx @@ -0,0 +1,53 @@ +#include "Attributes.h" +#include "Grid.h" +#include "PDIAdaptor.h" + +#include +#include +#include + +int main(int argc, char* argv[]) +{ + MPI_Init(&argc, &argv); + unsigned int numPoints[3] = { 70, 60, 44 }; + double spacing[3] = { 1, 1.1, 1.3 }; + Grid grid(numPoints, spacing); + Attributes attributes(&grid); + + if (argc < 2) + { + std::cerr << "expecting the pdi yaml config as argument" << std::endl; + return EXIT_FAILURE; + } + auto code = PDIAdaptor::Initialize(std::string(argv[1]), grid); + if (!code) + { + std::cerr << "PDIAdaptor::Initialize failure" << std::endl; + return EXIT_FAILURE; + } + + unsigned int numberOfTimeSteps = 10; + for (unsigned int timeStep = 0; timeStep < numberOfTimeSteps; timeStep++) + { + // use a time step length of 0.1 + double time = timeStep * 0.1; + attributes.UpdateFields(time); + + code = PDIAdaptor::Execute(timeStep, time, grid, attributes); + if (!code) + { + std::cerr << "PDIAdaptor::Execute failure" << std::endl; + return EXIT_FAILURE; + } + } + + code = PDIAdaptor::Finalize(); + if (!code) + { + std::cerr << "PDIAdaptor::Finalize failure" << std::endl; + return EXIT_FAILURE; + } + + MPI_Finalize(); + return EXIT_SUCCESS; +} diff --git a/plugins/catalyst/test/pdi.yml.in b/plugins/catalyst/test/pdi.yml.in new file mode 100644 index 000000000..d96507436 --- /dev/null +++ b/plugins/catalyst/test/pdi.yml.in @@ -0,0 +1,129 @@ +logging: + level: "info" +metadata: + points_array_size: size_t + cell_points_size: size_t + velocity_array_size: size_t + pressure_array_size: size_t +data: + cycle: int + time: double + number_of_points: size_t + number_of_cells: size_t + points_array: + type: array + subtype: double + size: $points_array_size + cell_points: + type: array + subtype: uint32 + size: $cell_points_size + velocity_array: + type: array + subtype: double + size: $velocity_array_size + pressure_array: + type: array + subtype: float + size: $pressure_array_size +plugins: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + PDI_execute_event_name: "catalyst_execute" + execute: + state: + timestep: + PDI_data: + name: "cycle" + time: + PDI_data: + name: "time" + multiblock: 1 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "explicit" + values: + x: + PDI_data: + name: "points_array" + size: + PDI_data: + name: "number_of_points" + offset: 0 + stride: 3 + y: + PDI_data: + name: "points_array" + size: + PDI_data: + name: "number_of_points" + offset: 1 + stride: 3 + z: + PDI_data: + name: "points_array" + size: + PDI_data: + name: "number_of_points" + offset: 2 + stride: 3 + topologies: + my_mesh: + type: "unstructured" + coordset: "my_coords" + elements: + shape: "hex" + connectivity: + PDI_data: + name: "cell_points" + size: + PDI_data: + name: "number_of_cells" + multiply: 8 + fields: + velocity: + association: "vertex" + topology: "my_mesh" + volume_dependent: "false" + values: + x: + PDI_data: + name: "velocity_array" + size: + PDI_data: + name: "number_of_points" + offset: 0 + y: + PDI_data: + name: "velocity_array" + size: + PDI_data: + name: "number_of_points" + offset: + PDI_data: + name: "number_of_points" + z: + PDI_data: + name: "velocity_array" + size: + PDI_data: + name: "number_of_points" + offset: + PDI_data: + name: "number_of_points" + multiply: 2 + pressure: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data: + name: "pressure_array" + size: + PDI_data: + name: "number_of_cells" diff --git a/plugins/catalyst/test/references/execute_reference.json b/plugins/catalyst/test/references/execute_reference.json new file mode 100644 index 000000000..9754b475a --- /dev/null +++ b/plugins/catalyst/test/references/execute_reference.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "little"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "little"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 184800,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 184800,"offset": 1478434,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 184800,"offset": 2956834,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 4435234,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 4435247,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 4435257,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "connectivity": {"dtype":"uint32","number_of_elements": 1400424,"offset": 4435261,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 10036957,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 10036964,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 10036972,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 184800,"offset": 10036978,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 184800,"offset": 11515378,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 184800,"offset": 12993778,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 14472178,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 14472186,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 14472194,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": {"dtype":"float32","number_of_elements": 175053,"offset": 14472200,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/references/execute_reference_rank0.json b/plugins/catalyst/test/references/execute_reference_rank0.json new file mode 100644 index 000000000..0c6da72c5 --- /dev/null +++ b/plugins/catalyst/test/references/execute_reference_rank0.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "little"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "little"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/references/execute_reference_rank1.json b/plugins/catalyst/test/references/execute_reference_rank1.json new file mode 100644 index 000000000..e22927616 --- /dev/null +++ b/plugins/catalyst/test/references/execute_reference_rank1.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "little"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "little"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 50160,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 50160,"offset": 401314,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 50160,"offset": 802594,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1203874,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1203887,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1203897,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "connectivity": {"dtype":"uint32","number_of_elements": 365328,"offset": 1203901,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2665213,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2665220,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2665228,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 50160,"offset": 2665234,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 50160,"offset": 3066514,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 50160,"offset": 3467794,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3869074,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3869082,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3869090,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": {"dtype":"float32","number_of_elements": 45666,"offset": 3869096,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/references/execute_reference_rank2.json b/plugins/catalyst/test/references/execute_reference_rank2.json new file mode 100644 index 000000000..0c6da72c5 --- /dev/null +++ b/plugins/catalyst/test/references/execute_reference_rank2.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "little"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "little"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/references/execute_reference_rank3.json b/plugins/catalyst/test/references/execute_reference_rank3.json new file mode 100644 index 000000000..0c6da72c5 --- /dev/null +++ b/plugins/catalyst/test/references/execute_reference_rank3.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "little"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "little"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "little"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "little"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "little"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "little"} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/references/finalize_reference.json b/plugins/catalyst/test/references/finalize_reference.json new file mode 100644 index 000000000..4d10ad555 --- /dev/null +++ b/plugins/catalyst/test/references/finalize_reference.json @@ -0,0 +1 @@ +{"dtype":"empty"} \ No newline at end of file diff --git a/plugins/catalyst/test/references/initialize_reference.json b/plugins/catalyst/test/references/initialize_reference.json new file mode 100644 index 000000000..0457a7e38 --- /dev/null +++ b/plugins/catalyst/test/references/initialize_reference.json @@ -0,0 +1,10 @@ + +{ + "catalyst": + { + "scripts": + { + "script1": {"dtype":"char8_str","number_of_elements": 110,"offset": 0,"stride": 1,"element_bytes": 1,"endianness": "little"} + } + } +} \ No newline at end of file diff --git a/plugins/catalyst/test/run_test.py b/plugins/catalyst/test/run_test.py new file mode 100644 index 000000000..73facf916 --- /dev/null +++ b/plugins/catalyst/test/run_test.py @@ -0,0 +1,46 @@ +import subprocess +import sys +import os +import filecmp +import json + +binary_folder = sys.argv[1] +source_folder = sys.argv[2] + +env = os.environ.copy() +env["CATALYST_DATA_DUMP_DIRECTORY"] = binary_folder +env["PDI_PLUGIN_PATH"] = binary_folder + '/..' +result = subprocess.run([binary_folder + "/TestPDICatalyst", binary_folder + "/pdi.yml"], env=env) + +if(result.returncode != 0): + exit(result.returncode) + +# Check the initialize json dump +reference_initialize_json = source_folder + "/references/initialize_reference.json" +actual_initialize_json = binary_folder + "initialize_params.conduit_bin.1.0_json" +with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) + if ref_json.items() != actual_json.items(): + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] + if ref_json.items() != actual_json.items(): + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) + +# Check the execute json dump +reference_execute_json = source_folder + "/references/execute_reference.json" +for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.1.0_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') + exit(1) + +# Check the finalize json dump +reference_finalize_json = source_folder + "/references/finalize_reference.json" +actual_finalize_json = binary_folder + "finalize_params.conduit_bin.1.0_json" +if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + exit(1) + diff --git a/plugins/catalyst/test/run_test_mpi.py b/plugins/catalyst/test/run_test_mpi.py new file mode 100644 index 000000000..48392459a --- /dev/null +++ b/plugins/catalyst/test/run_test_mpi.py @@ -0,0 +1,51 @@ +import subprocess +import sys +import os +import filecmp +import json + +binary_folder = sys.argv[1] +source_folder = sys.argv[2] +mpi_exec = sys.argv[3] + +env = os.environ.copy() +env["CATALYST_DATA_DUMP_DIRECTORY"] = binary_folder +env["PDI_PLUGIN_PATH"] = binary_folder + '/..' +result = subprocess.run([mpi_exec, "-np", "4", binary_folder + "/TestPDICatalyst", binary_folder + "/pdi.yml"], env=env) + +if(result.returncode != 0): + exit(result.returncode) + +# Check the initialize json dump for each rank. +reference_initialize_json = source_folder + "/references/initialize_reference.json" +for rank in range(4): + actual_initialize_json = binary_folder + f"initialize_params.conduit_bin.4.{rank}_json" + with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) + if ref_json.items() != actual_json.items(): + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] + if ref_json.items() != actual_json.items(): + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) + + +# Check the execute json dump for each rank. +for rank in range(4): + reference_execute_json = source_folder + f"/references/execute_reference_rank{rank}.json" + for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') + exit(1) + +# Check the finalize json dump for each rank. +reference_finalize_json = source_folder + "/references/finalize_reference.json" +for rank in range(4): + actual_finalize_json = binary_folder + f"finalize_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + exit(1) + From 8102376b410c2f96b2d0d88b59cfabacb8cf94ab Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Fri, 31 Jan 2025 13:34:39 +0100 Subject: [PATCH 05/31] Add francois as author. --- AUTHORS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/AUTHORS b/AUTHORS index 402523718..b167fab30 100644 --- a/AUTHORS +++ b/AUTHORS @@ -25,6 +25,9 @@ Benoit Martin - CEA (bmartin@cea.fr) * support for const data in `PDI_share`, `PDI_expose` and `PDI_multi_expose` * Initial implementation of the cmake test +François Mazen - Kitware (francois.mazen@kitware.com) +* Catalyst plugin + François-Xavier Mordant - CEA (francois-xavier.mordant@cea.fr) * Fixed CMake issues, internal API enhancement * Bug fix, JSON plugin @@ -113,3 +116,4 @@ Yushan Wang - CEA (yushan.wang@cea.fr) * Maintainer (Sept. 2023 - ...) * enable HDF5 subfiling * Add native compression support in Decl'NetCDF + From c81978b541d13e262df5ac369f0939d828fced30 Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Fri, 31 Jan 2025 16:34:11 +0100 Subject: [PATCH 06/31] Build catalyst plugin with BUILD_UNSTABLE option only. --- CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 204173e6d..0304edc97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -76,8 +76,7 @@ option(BUILD_SHARED_LIBS "Build shared libraries rather than static ones" option(BUILD_TRACE_PLUGIN "Build Trace plugin" ON) option(BUILD_USER_CODE_PLUGIN "Build User-code plugin" ON) option(BUILD_JSON_PLUGIN "Build JSON plugin" OFF) -option(ENABLE_BENCHMARKING "Activate benchmarks in the test suite" OFF) -option(BUILD_CATALYST_PLUGIN "Build Catalyst plugin" ON) +option(BUILD_CATALYST_PLUGIN "Build Catalyst plugin" "${BUILD_UNSTABLE}") From d4dadc9d85d98bcef271fe769bac6fc1e73811ad Mon Sep 17 00:00:00 2001 From: Francois Mazen Date: Fri, 31 Jan 2025 16:40:39 +0100 Subject: [PATCH 07/31] Update copyright --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0304edc97..03e0c9017 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,6 @@ #============================================================================= # Copyright (C) 2015-2026 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# Copyright (C) 2024-2025 Kitware SAS # # All rights reserved. # From de4193fc02f4151fd0a9f3e86dfa75dfb773c805 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Wed, 30 Jul 2025 11:31:40 +0200 Subject: [PATCH 08/31] fix bug of cell_points_size --- plugins/catalyst/test/PDIAdaptor.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/test/PDIAdaptor.cxx index cc45967d3..72bf8196f 100644 --- a/plugins/catalyst/test/PDIAdaptor.cxx +++ b/plugins/catalyst/test/PDIAdaptor.cxx @@ -25,8 +25,8 @@ bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) return false; } - auto number_of_cells = grid.GetNumberOfCells(); - status = PDI_expose("cell_points_size", &number_of_cells, PDI_OUT); + auto cell_points_size = grid.GetNumberOfCells() * 8; + status = PDI_expose("cell_points_size", &cell_points_size, PDI_OUT); if (status != PDI_status_t::PDI_OK) { return false; From fe23780c6a58908f68a4bdeb5c1bf06992e86cde Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 26 Aug 2025 12:02:35 +0200 Subject: [PATCH 09/31] PDI_data remove --- plugins/catalyst/pdi_catalyst_plugin.cxx | 166 +++++++++++------- plugins/catalyst/pdi_catalyst_plugin.h | 2 +- plugins/catalyst/test/Attributes.cxx | 14 +- plugins/catalyst/test/PDIAdaptor.cxx | 27 ++- .../test/catalyst_pipeline_with_rendering.py | 60 ++++++- plugins/catalyst/test/pdi.yml.in | 110 +++++------- 6 files changed, 249 insertions(+), 130 deletions(-) diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 1971966ad..ee69b9a73 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -45,6 +45,7 @@ void catalyst_plugin::ProcessData(const std::string& data_name, PDI::Ref ref) void catalyst_plugin::ProcessEvent(const std::string& event_name) { + context().logger().trace("####### call catalyst_plugin::ProcessEvent ######"); if (event_name == this->PDIExecuteEventName) { RunCatalystExecute(); @@ -95,7 +96,6 @@ void catalyst_plugin::RunCatalystExecute() remainingTreeAndParentNode.pop(); auto current_node = conduit_cpp::cpp_node(current.parentNode)[current.name]; - switch (current.tree.node->type) { case YAML_NO_NODE: @@ -105,10 +105,58 @@ void catalyst_plugin::RunCatalystExecute() switch (current.tree.node->data.scalar.style) { case YAML_PLAIN_SCALAR_STYLE: - // TODO: handle float/double type. - current_node.set_int64(PDI::to_long(current.tree)); + // handle integer or float/double type that doesn't depend on PDI store + { + PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + + auto data_type = spec_ref.type()->evaluate(context()); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) + { + PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) + { + current_node.set_int64(data_expression.to_long(context())); + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) + { + context().logger().error("The expression {} is defined as unsigned integer.", PDI::to_string(current.tree)); + // context().logger().trace("The expression {} is defined with unsigned integer. It is tranformed to signed integer.", PDI::to_string(current.tree)); + // current_node.set_int64(data_expression.to_long(context())); + } + else if (scalar_kind == PDI::Scalar_kind::FLOAT) + { + current_node.set_float64(data_expression.to_double(context())); + } + else + { + context().logger().error("Unknown Scalar Type for variable {}", PDI::to_string(current.tree)); + } + } + else + { + context().logger().error("Unsupported datatype for variable: {}", PDI::to_string(current.tree)); + } + } break; case YAML_SINGLE_QUOTED_SCALAR_STYLE: + // handle integer or float/double type that depend on scalar PDI data + { + std::string data_name{PDI::to_string(current.tree)}; + PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + auto data_type = spec_ref.type()->evaluate(context()); + + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) + { + FillNodeWithScalarPDIData(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + } + else + { + context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + } + } + break; case YAML_DOUBLE_QUOTED_SCALAR_STYLE: current_node.set_string(PDI::to_string(current.tree)); break; @@ -124,26 +172,33 @@ void catalyst_plugin::RunCatalystExecute() break; case YAML_MAPPING_NODE: int data_tree_size = PDI::len(current.tree); - // Check for dynamic PDI Data. - if (data_tree_size == 1) + // Check for dynamic PDI Data array + bool pdi_data_array=false; + for (int i = data_tree_size - 1; i >= 0; --i) { - auto key = PC_get(current.tree, "{%d}", 0); - if (PDI::to_string(key) == "PDI_data") + auto key = PC_get(current.tree, "{%d}", i); + if (PDI::to_string(key) == "PDI_data_array") { - auto value = PC_get(current.tree, "<%d>", 0); - this->FillNodeWithPDIData(conduit_cpp::c_node(¤t_node), value); - break; + this->FillNodeWithPDIDataArray(conduit_cpp::c_node(¤t_node), current.tree); + pdi_data_array=true; + break; // break the loop } } + if (pdi_data_array) + { + break; // break the case + } + // reverse order to get the correct order when poping the stack. for (int i = data_tree_size - 1; i >= 0; --i) { auto key = PC_get(current.tree, "{%d}", i); auto value = PC_get(current.tree, "<%d>", i); - // std::cout << "Mapping Node: " << PDI::to_string(key) << std::endl; + std::cout << "Mapping Node: key="<< PDI::to_string(key) << std::endl; remainingTreeAndParentNode.push( { value, PDI::to_string(key), conduit_cpp::c_node(¤t_node) }); } + std::cout << "End YAML_MAPPING_NODE" << std::endl; break; } } @@ -160,6 +215,7 @@ void catalyst_plugin::RunCatalystExecute() context().logger().error("catalyst_execute failure"); } + // clear CurrentPDIData at each iteration this->CurrentPDIData.clear(); } @@ -174,16 +230,16 @@ void catalyst_plugin::RunCatalystFinalize() } } -void catalyst_plugin::FillNodeWithPDIData(conduit_node* node, PC_tree_t tree) +void catalyst_plugin::FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tree) { - auto name_spec = PC_get(tree, ".name"); + auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { context().logger().error("No \"name\" child in PDI_data spec."); return; } - std::string name = PDI::to_string(name_spec); + std::string name = PDI::to_string(name_spec); // Jacques: Perhaps we need an expression if the users add an index for example "my_name${index}". auto it = this->CurrentPDIData.find(name); if (it == this->CurrentPDIData.end()) { @@ -192,6 +248,7 @@ void catalyst_plugin::FillNodeWithPDIData(conduit_node* node, PC_tree_t tree) } auto ref = it->second; PDI::Ref_r ref_r{ ref }; + if (!ref_r) { context().logger().error("The PDIData named \"{}\" is not readable.", name); @@ -199,17 +256,13 @@ void catalyst_plugin::FillNodeWithPDIData(conduit_node* node, PC_tree_t tree) } auto data_type = ref_r.type(); - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) - { - FillNodeWithScalarPDIData(node, name, *scalar_datatype, ref_r); - } - else if (auto array_datatype = std::dynamic_pointer_cast(data_type)) + if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { FillNodeWithArrayPDIData(node, name, tree, *array_datatype, ref_r); } else { - context().logger().error("Unsupported datatype for variable: {}", name); + context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); } } @@ -294,6 +347,7 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st PC_tree_t& tree, const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r) { PDI::Datatype_sptr type = array_datatype.subtype(); + // Jacques: Pourquoi une boucle While ?? Infini ?? while (auto&& array_type = std::dynamic_pointer_cast(type)) { type = array_type->subtype(); @@ -305,12 +359,17 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st return; } + // Jacques: il faut toujours que le .size soit defini ==> faire un test. conduit_index_t num_elements = 0; auto size_spec = PC_get(tree, ".size"); if (PC_status(size_spec) == PC_OK) { num_elements = GetLongValueFromSpecNode(size_spec, name); } + else + { + context().logger().error("Unknown the number of elements for variable{} passed to catalyst.", name); + } conduit_index_t offset = 0; auto offset_spec = PC_get(tree, ".offset"); @@ -436,53 +495,40 @@ long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::strin { if (spec.node->type == YAML_SCALAR_NODE) { - return PDI::to_long(spec); - } - else if (spec.node->type == YAML_MAPPING_NODE) - { - auto pdi_data_spec = PC_get(spec, ".PDI_data"); - if (PC_status(pdi_data_spec)) + PDI::Expression data_expression{PDI::to_string(spec)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + if (!spec_ref) { - context().logger().error("Unsupported mapping under the variable: {}", name); + context().logger().error("The PDIData named \"{}\" is not readable.", name); return 0; } - auto name_spec = PC_get(pdi_data_spec, ".name"); - if (PC_status(name_spec)) - { - context().logger().error("No \"name\" child in PDI_data spec."); - return 0; - } - - std::string variable_name = PDI::to_string(name_spec); - auto it = this->CurrentPDIData.find(variable_name); - if (it == this->CurrentPDIData.end()) + auto data_type = spec_ref.type()->evaluate(context()); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - context().logger().error("Can't find the PDI_data named: {}", variable_name); - } - auto ref = it->second; - PDI::Ref_r ref_r{ ref }; - if (!ref_r) - { - context().logger().error("The PDIData named \"{}\" is not readable.", variable_name); - return 0; - } - - auto scalar_datatype = std::dynamic_pointer_cast(ref_r.type()); - if (!scalar_datatype) - { - context().logger().error( - "PDI Data subtype of variable {} should be scalar type.", variable_name); - return 0; + PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) + { + return data_expression.to_long(context()); + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) + { + // Jacques: auto value = ref_r.scalar_value();?? a utiliser en fonction du buffersize ?? + return data_expression.to_long(context()); + } + else + { + context().logger().error("Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); + } } - auto value = ref_r.scalar_value(); - - long multiply = 1; - auto multiply_spec = PC_get(pdi_data_spec, ".multiply"); - if (!PC_status(multiply_spec)) + else { - multiply = PDI::to_long(multiply_spec); + context().logger().error("The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); } - return value * multiply; + return 0; + } + else + { + context().logger().error("Supported only YAML_SCALAR_NODE for variable {}", name); } return 0; } diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index a6300305a..43d2df9a5 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -33,7 +33,7 @@ class catalyst_plugin : public PDI::Plugin void RunCatalystInitialize(); void RunCatalystExecute(); void RunCatalystFinalize(); - void FillNodeWithPDIData(conduit_node* node, PC_tree_t tree); + void FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tree); void FillNodeWithScalarPDIData(conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); void FillNodeWithArrayPDIData(conduit_node* node, const std::string& name, PC_tree_t& tree, diff --git a/plugins/catalyst/test/Attributes.cxx b/plugins/catalyst/test/Attributes.cxx index 3d027a423..31d009ad9 100644 --- a/plugins/catalyst/test/Attributes.cxx +++ b/plugins/catalyst/test/Attributes.cxx @@ -22,9 +22,21 @@ void Attributes::UpdateFields(double time) this->Velocity[pt] = coord[1] * time; } std::fill(this->Velocity.begin() + numPoints, this->Velocity.end(), 0.); + size_t numCells = this->GridPtr->GetNumberOfCells(); this->Pressure.resize(numCells); - std::fill(this->Pressure.begin(), this->Pressure.end(), 1.f); + + double tmp_var=(numCells * time*0.5); + size_t first_cells; + if( tmp_var < 0 ){ + first_cells=0; + } + else { + first_cells= (size_t) tmp_var; + } + + std::fill(this->Pressure.begin(), this->Pressure.end(), -1.f); + std::fill(this->Pressure.begin()+first_cells, this->Pressure.end(), 1.f); } double* Attributes::GetVelocityArray() diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/test/PDIAdaptor.cxx index 72bf8196f..e5733d7b5 100644 --- a/plugins/catalyst/test/PDIAdaptor.cxx +++ b/plugins/catalyst/test/PDIAdaptor.cxx @@ -5,6 +5,7 @@ #include #include +#include namespace PDIAdaptor { @@ -25,8 +26,8 @@ bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) return false; } - auto cell_points_size = grid.GetNumberOfCells() * 8; - status = PDI_expose("cell_points_size", &cell_points_size, PDI_OUT); + auto number_of_cells = grid.GetNumberOfCells() * 8; + status = PDI_expose("cell_points_size", &number_of_cells, PDI_OUT); if (status != PDI_status_t::PDI_OK) { return false; @@ -54,6 +55,22 @@ bool Execute(int cycle, double time, Grid& grid, Attributes& attribs) auto number_of_points = grid.GetNumberOfPoints(); auto number_of_cells = grid.GetNumberOfCells(); + std::cout << "#### begin false_catalyst_execute ####" << std::endl; + + auto status_false = PDI_multi_expose( + // + "false_catalyst_execute", + // + "number_of_cells", &number_of_cells, PDI_OUT, + // + "cell_points", grid.GetCellPoints(0), PDI_OUT, + // + "velocity_array", attribs.GetVelocityArray(), PDI_OUT, + // + "pressure_array", attribs.GetPressureArray(), PDI_OUT, + // + NULL); + auto status = PDI_multi_expose( // "catalyst_execute", @@ -62,14 +79,14 @@ bool Execute(int cycle, double time, Grid& grid, Attributes& attribs) // "time", &time, PDI_OUT, // - "points_array", grid.GetPointsArray(), PDI_OUT, - // "number_of_points", &number_of_points, PDI_OUT, // - "cell_points", grid.GetCellPoints(0), PDI_OUT, + "points_array", grid.GetPointsArray(), PDI_OUT, // "number_of_cells", &number_of_cells, PDI_OUT, // + "cell_points", grid.GetCellPoints(0), PDI_OUT, + // "velocity_array", attribs.GetVelocityArray(), PDI_OUT, // "pressure_array", attribs.GetPressureArray(), PDI_OUT, diff --git a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py index 12640fde8..cacf33810 100644 --- a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py +++ b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py @@ -43,6 +43,7 @@ # show color legend gridDisplay.SetScalarBarVisibility(renderView1, True) + # ---------------------------------------------------------------- # setup extractors # ---------------------------------------------------------------- @@ -58,10 +59,67 @@ pNG1.Writer.ImageResolution = [1600,800] pNG1.Writer.Format = 'PNG' +# # ######## render view pressure + +# # Create a new 'Render View' +# renderView2 = CreateView('RenderView') +# renderView2.ViewSize = [1600,800] +# renderView2.CameraPosition = [157.90070691620653, 64.91180236667495, 167.90421495515105] +# renderView2.CameraFocalPoint = [19.452526958533134, 28.491610229010647, 10.883993417012459] +# renderView2.CameraViewUp = [0.07934883419275315, 0.953396338566962, -0.2910999555468221] +# renderView2.CameraFocalDisk = 1.0 +# renderView2.CameraParallelScale = 54.99504523136608 + +# # get color transfer function/color map for 'velocity' +# pressureLUT = GetColorTransferFunction('pressure') +# pressureLUT.RGBPoints = [-2.0, 0.231373, 0.298039, 0.752941, +# 0.0, 0.865003, 0.865003, 0.865003, +# 2.0, 0.705882, 0.0156863, 0.14902] +# pressureLUT.ScalarRangeInitialized = 1.0 + +# # show data from grid +# gridDisplay222 = Show(producer, renderView2, 'UnstructuredGridRepresentation') ## pourquoi pas uniform possible +# #gridDisplay222 = Show(producer, renderView2, 'StructuredGridRepresentation') ## pourquoi pas uniform possible + + +# gridDisplay222.Representation = 'Surface' +# gridDisplay222.ColorArrayName = ['CELLS', 'pressure'] +# gridDisplay222.LookupTable = pressureLUT + +# # get color legend/bar for pressureLUT in view renderView2 +# pressureLUTColorBar = GetScalarBar(pressureLUT, renderView2) +# pressureLUTColorBar.Title = 'pressure' +# pressureLUTColorBar.ComponentTitle = 'Magnitude' + +# # set color bar visibility +# pressureLUTColorBar.Visibility = 1 + +# # show color legend +# gridDisplay222.SetScalarBarVisibility(renderView2, True) + + +# # # ---------------------------------------------------------------- +# # # setup extractors +# # # ---------------------------------------------------------------- + +# SetActiveView(renderView2) +# # create extractor +# pNG2= CreateExtractor('PNG', renderView2, registrationName='PNG2') +# # trace defaults for the extractor. +# pNG2.Trigger = 'TimeStep' + +# # init the 'PNG' selected for 'Writer' +# pNG2.Writer.FileName = 'pressure_screenshot_{timestep:06d}.png' +# pNG2.Writer.ImageResolution = [1600,800] +# pNG2.Writer.Format = 'PNG' + + # ------------------------------------------------------------------------------ # Catalyst options options = catalyst.Options() -options.EnableCatalystLive = 1 +## 0: no client, generate the images +## 1: interactif +options.EnableCatalystLive = 0 # Greeting to ensure that ctest knows this script is being imported diff --git a/plugins/catalyst/test/pdi.yml.in b/plugins/catalyst/test/pdi.yml.in index d96507436..0b8d1f327 100644 --- a/plugins/catalyst/test/pdi.yml.in +++ b/plugins/catalyst/test/pdi.yml.in @@ -1,5 +1,5 @@ logging: - level: "info" + level: "debug" metadata: points_array_size: size_t cell_points_size: size_t @@ -27,18 +27,18 @@ data: subtype: float size: $pressure_array_size plugins: + trace: +# decl_hdf5: +# file: test_writing_.h5 +# write: [number_of_points,points_array] catalyst: scripts: script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" PDI_execute_event_name: "catalyst_execute" execute: state: - timestep: - PDI_data: - name: "cycle" - time: - PDI_data: - name: "time" + timestep: '$cycle' + time: '$time' multiblock: 1 channels: grid: @@ -49,29 +49,20 @@ plugins: type: "explicit" values: x: - PDI_data: - name: "points_array" - size: - PDI_data: - name: "number_of_points" - offset: 0 - stride: 3 + PDI_data_array: "points_array" + size: '$number_of_points' + offset: 0 + stride: 3 y: - PDI_data: - name: "points_array" - size: - PDI_data: - name: "number_of_points" - offset: 1 - stride: 3 + PDI_data_array: "points_array" + size: '$number_of_points' + offset: 1 + stride: 3 z: - PDI_data: - name: "points_array" - size: - PDI_data: - name: "number_of_points" - offset: 2 - stride: 3 + PDI_data_array: "points_array" + size: '$number_of_points' + offset: 2 + stride: 3 topologies: my_mesh: type: "unstructured" @@ -79,12 +70,8 @@ plugins: elements: shape: "hex" connectivity: - PDI_data: - name: "cell_points" - size: - PDI_data: - name: "number_of_cells" - multiply: 8 + PDI_data_array: "cell_points" + size: '8*$number_of_cells' fields: velocity: association: "vertex" @@ -92,38 +79,37 @@ plugins: volume_dependent: "false" values: x: - PDI_data: - name: "velocity_array" - size: - PDI_data: - name: "number_of_points" - offset: 0 + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: 0 y: - PDI_data: - name: "velocity_array" - size: - PDI_data: - name: "number_of_points" - offset: - PDI_data: - name: "number_of_points" + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: '$number_of_points' z: - PDI_data: - name: "velocity_array" - size: - PDI_data: - name: "number_of_points" - offset: - PDI_data: - name: "number_of_points" - multiply: 2 + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: '2*$number_of_points' + association: "vertex" + topology: "my_mesh" + volume_dependent: "false" + values: + x: + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: 0 + y: + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: '$number_of_points' + z: + PDI_data_array: "velocity_array" + size: '$number_of_points' + offset: '2*$number_of_points' pressure: association: "element" topology: "my_mesh" volume_dependent: "false" values: - PDI_data: - name: "pressure_array" - size: - PDI_data: - name: "number_of_cells" + PDI_data_array: "pressure_array" + size: '$number_of_cells' From 6abc52ff89704762aa1858356bdd19d108deebc0 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Mon, 15 Sep 2025 12:28:12 +0200 Subject: [PATCH 10/31] remove unecessary line --- plugins/catalyst/test/PDIAdaptor.cxx | 16 ---------------- plugins/catalyst/test/pdi.yml.in | 19 ------------------- 2 files changed, 35 deletions(-) diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/test/PDIAdaptor.cxx index e5733d7b5..f50ae6361 100644 --- a/plugins/catalyst/test/PDIAdaptor.cxx +++ b/plugins/catalyst/test/PDIAdaptor.cxx @@ -55,22 +55,6 @@ bool Execute(int cycle, double time, Grid& grid, Attributes& attribs) auto number_of_points = grid.GetNumberOfPoints(); auto number_of_cells = grid.GetNumberOfCells(); - std::cout << "#### begin false_catalyst_execute ####" << std::endl; - - auto status_false = PDI_multi_expose( - // - "false_catalyst_execute", - // - "number_of_cells", &number_of_cells, PDI_OUT, - // - "cell_points", grid.GetCellPoints(0), PDI_OUT, - // - "velocity_array", attribs.GetVelocityArray(), PDI_OUT, - // - "pressure_array", attribs.GetPressureArray(), PDI_OUT, - // - NULL); - auto status = PDI_multi_expose( // "catalyst_execute", diff --git a/plugins/catalyst/test/pdi.yml.in b/plugins/catalyst/test/pdi.yml.in index 0b8d1f327..20106801e 100644 --- a/plugins/catalyst/test/pdi.yml.in +++ b/plugins/catalyst/test/pdi.yml.in @@ -28,9 +28,6 @@ data: size: $pressure_array_size plugins: trace: -# decl_hdf5: -# file: test_writing_.h5 -# write: [number_of_points,points_array] catalyst: scripts: script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" @@ -90,22 +87,6 @@ plugins: PDI_data_array: "velocity_array" size: '$number_of_points' offset: '2*$number_of_points' - association: "vertex" - topology: "my_mesh" - volume_dependent: "false" - values: - x: - PDI_data_array: "velocity_array" - size: '$number_of_points' - offset: 0 - y: - PDI_data_array: "velocity_array" - size: '$number_of_points' - offset: '$number_of_points' - z: - PDI_data_array: "velocity_array" - size: '$number_of_points' - offset: '2*$number_of_points' pressure: association: "element" topology: "my_mesh" From 714bc67eb05ea22318b998548225d1de6fc1b226 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Mon, 15 Sep 2025 14:04:52 +0200 Subject: [PATCH 11/31] adding example for the tutorial --- .../CMakeLists.txt | 41 +++ .../catalyst_pipeline.py | 20 ++ .../catalyst_pipeline_with_rendering.py | 99 ++++++ .../config_init.yml | 72 ++++ .../main.c | 315 ++++++++++++++++++ .../run_test.py | 18 + .../run_test_mpi.py | 18 + 7 files changed, 583 insertions(+) create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/run_test.py create mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/run_test_mpi.py diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt new file mode 100644 index 000000000..40873b73d --- /dev/null +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt @@ -0,0 +1,41 @@ +#============================================================================= +# Copyright (C) 2015-2023 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +#============================================================================= + +cmake_minimum_required(VERSION 3.16) +#project(pdi_init LANGUAGES C) + +#find_package(spdlog) +find_package(MPI REQUIRED COMPONENTS C) +#find_package(paraconf 1.0.0 REQUIRED COMPONENTS C) +#find_package(PDI 1.9.0 REQUIRED COMPONENTS C) + +set(CMAKE_C_STANDARD 99) + +add_executable(tuto_main_ghost_attributes main.c) +target_link_libraries(tuto_main_ghost_attributes m MPI::MPI_C paraconf::paraconf PDI::pdi) + +set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes) +configure_file(config_init.yml config.yml) + +find_package(Python3 COMPONENTS Interpreter) +add_test(NAME Test_tuto_september_2025_ghost_attributes COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/run_test.py" "${CMAKE_BINARY_DIR}/test_tuto_september_2025_ghost_attributes/" "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/") +add_test(NAME Test_tuto_september_2025_ghost_attributes_MPI COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/run_test_mpi.py" "${CMAKE_BINARY_DIR}/test_tuto_september_2025_ghost_attributes/" "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/" "${MPIEXEC_EXECUTABLE}") diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py new file mode 100644 index 000000000..d815a4ec9 --- /dev/null +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py @@ -0,0 +1,20 @@ +from paraview.simple import * + +# Greeting to ensure that ctest knows this script is being imported +print("executing catalyst_pipeline") + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) +# print("velocity-magnitude-range:", producer.PointData["velocity"].GetRange(-1)) + print("temperature-range:", producer.CellData["temperature"].GetRange(0)) +# make sure that the ghost array has been renamed to vtkGhostType + print("temperature-range22:", producer.CellData["vtkGhostType"].GetRange(0)) + diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py new file mode 100644 index 000000000..1c482e679 --- /dev/null +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py @@ -0,0 +1,99 @@ +# script-version: 2.0 +from paraview.simple import * +from paraview import catalyst +import time + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +# ---------------------------------------------------------------- +# setup views used in the visualization +# ---------------------------------------------------------------- + +jj_ny = 60 + 2 +jj_nx = 12 + 2 + +div_jj_nx = jj_nx/2 +div_jj_ny = jj_ny/2 + +jj_pos = 1.2*jj_nx*900/120 +jj_focal_point = 2*jj_nx*240/120 + +# ######## render view temperature + +# Create a new 'Render View' +renderView1 = CreateView('RenderView') +renderView1.ViewSize = [1000,1600] +renderView1.CameraPosition = [div_jj_nx, div_jj_ny, jj_pos] +renderView1.CameraFocalPoint = [div_jj_nx, div_jj_ny, jj_focal_point] +renderView1.CameraParallelScale = 100 + +# get color transfer function/color map for 'temperature' +temperatureLUT = GetColorTransferFunction('temperature') +## RGB: first line: min value, last line: max value +temperatureLUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, + 100.0, 0.865003, 0.865003, 0.865003, + 200.0, 0.705882, 0.0156863, 0.14902] +temperatureLUT.ScalarRangeInitialized = 1.0 + +# show data from grid +# gridDisplay222 = Show(producer, renderView1, 'UnstructuredGridRepresentation') +gridDisplay222 = Show(producer, renderView1, 'StructuredGridRepresentation') + +gridDisplay222.Representation = 'Surface With Edges' +gridDisplay222.ColorArrayName = ['CELLS', 'temperature'] +gridDisplay222.LookupTable = temperatureLUT + +# get color legend/bar for temperatureLUT in view renderView1 +temperatureLUTColorBar = GetScalarBar(temperatureLUT, renderView1) +temperatureLUTColorBar.Title = 'temperature' +temperatureLUTColorBar.ComponentTitle = 'Magnitude' + +# set color bar visibility +temperatureLUTColorBar.Visibility = 1 + +# show color legend +gridDisplay222.SetScalarBarVisibility(renderView1, True) + + +# # ---------------------------------------------------------------- +# # setup extractors +# # ---------------------------------------------------------------- + +SetActiveView(renderView1) +# create extractor +pNG2= CreateExtractor('PNG', renderView1, registrationName='PNG2') +# trace defaults for the extractor. +pNG2.Trigger = 'TimeStep' + +# init the 'PNG' selected for 'Writer' +pNG2.Writer.FileName = 'temperature_screenshot_{timestep:06d}.png' +pNG2.Writer.ImageResolution = [1000,1600] +pNG2.Writer.Format = 'PNG' + + +# ------------------------------------------------------------------------------ +# Catalyst options +options = catalyst.Options() +## 0: pas de client, generration des images +## 1: interactif +options.EnableCatalystLive = 0 + + +# Greeting to ensure that ctest knows this script is being imported +print("#############################################################") +print("executing catalyst_pipeline") +print("#############################################################") +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) + print("temperature-range:", producer.CellData["temperature"].GetRange(0)) + # In a real simulation sleep is not needed. We use it here to slow down the + # "simulation" and make sure ParaView client can catch up with the produced + # results instead of having all of them flashing at once. + if options.EnableCatalystLive: + time.sleep(1) diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml new file mode 100644 index 000000000..9f4852724 --- /dev/null +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml @@ -0,0 +1,72 @@ +# the alpha parameter +alpha: 0.125 +# global data-size (excluding the number of ghost layers for boundary conditions) +global_size: { height: 60, width: 12 } +# degree of parallelism (number of blocks in each dimension) +parallelism: { height: 2, width: 2 } + +pdi: + metadata: + local_size: {type: array, subtype: int, size: 2} + rank: int + pcoord: {type: array, subtype: int, size: 2} + psize: {type: array, subtype: int, size: 2} + max_iter: int + data: + iteration: int + temp: {type: array, subtype: double, size: ['$local_size[0]', '$local_size[1]']} + total_number_of_points: size_t + cycle: int + time: double + ghosts: { type: array, subtype: uint8_t, size: '$local_size[0]*$local_size[1]' } + coords_x: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } + coords_y: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } + plugins: + trace: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + PDI_execute_event_name: "catalyst_execute" + execute: + state: + timestep: '$cycle' + time: '$time' + multiblock: 1 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "explicit" + values: + x: + PDI_data_array: "coords_x" + size: '($local_size[0]+1)*($local_size[1]+1)' + y: + PDI_data_array: "coords_y" + size: '($local_size[0]+1)*($local_size[1]+1)' + topologies: + my_mesh: + type: "structured" + coordset: "my_coords" + elements: + dims: + i: '$local_size[1]' + j: '$local_size[0]' + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "temp" + size: '$local_size[0]*$local_size[1]' + vtkGhostType: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "ghosts" + size: '$local_size[0]*$local_size[1]' \ No newline at end of file diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c new file mode 100644 index 000000000..092da6686 --- /dev/null +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c @@ -0,0 +1,315 @@ +/******************************************************************************* + * Copyright (C) 2015-2025 Commissariat a l'energie atomique et aux energies + * alternatives (CEA) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + ******************************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +// size of the local data as [HEIGHT, WIDTH] including the number of ghost +// layers for communications or boundary conditions +int dsize[2]; + +// 2D size of the process grid as [HEIGHT, WIDTH] +int psize[2]; + +// 2D rank of the local process in the process grid as [YY, XX] +int pcoord[2]; + +// the alpha coefficient used in the computation +double alpha; + +double L = 1.0; +// definition of the source +// the source corresponds to a disk of an uniform value +// source1: center=(0.4,0.4), radius=0.2 and value=100 +double source1[4] = {0.4, 0.4, 0.2, 100}; +// source2: center=(0.8,0.7), radius=0.1 and value=200 +double source2[4] = {0.7, 0.8, 0.1, 200}; +// the order of the coordinates of the center (XX,YY) is inverted in the vector + +/** Initialize all the data to 0, with the exception of each cells + * whose center (cpos_x,cpos_y) is inside of the disks + * defined by source1 or source2 + * \param[out] dat the local data to initialize + */ +void init(double dat[dsize[0]][dsize[1]]) { + for (int yy = 0; yy < dsize[0]; ++yy) + for (int xx = 0; xx < dsize[1]; ++xx) + dat[yy][xx] = 0; + double dy = L / ((dsize[0] - 2) * psize[0]); + double dx = L / ((dsize[1] - 2) * psize[1]); + + double cpos_x, cpos_y; + double square_dist1, square_dist2; + for (int yy = 0; yy < dsize[0]; ++yy) { + cpos_y = (yy + pcoord[0] * (dsize[0] - 2)) * dy - 0.5 * dy; + for (int xx = 0; xx < dsize[1]; ++xx) { + cpos_x = (xx + pcoord[1] * (dsize[1] - 2)) * dx - 0.5 * dx; + square_dist1 = (cpos_y - source1[0]) * (cpos_y - source1[0]) + + (cpos_x - source1[1]) * (cpos_x - source1[1]); + if (square_dist1 <= source1[2] * source1[2]) { + dat[yy][xx] = source1[3]; + } + square_dist2 = (cpos_y - source2[0]) * (cpos_y - source2[0]) + + (cpos_x - source2[1]) * (cpos_x - source2[1]); + if (square_dist2 <= source2[2] * source2[2]) { + dat[yy][xx] = source2[3]; + } + } + } +} + +/** Compute the values at the next time-step based on the values at the current + * time-step + * \param[in] cur the local data at the current time-step + * \param[out] next the local data at the next time-step + */ +void iter(double cur[dsize[0]][dsize[1]], double next[dsize[0]][dsize[1]]) { + int xx, yy; + for (yy = 1; yy < dsize[0] - 1; ++yy) { + for (xx = 1; xx < dsize[1] - 1; ++xx) { + next[yy][xx] = (1. - 4. * alpha) * cur[yy][xx] + + alpha * (cur[yy][xx - 1] + cur[yy][xx + 1] + + cur[yy - 1][xx] + cur[yy + 1][xx]); + } + } +} + +/** Exchange ghost values with neighbours + * \param[in] cart_comm the MPI communicator with all processes organized in a + * 2D Cartesian grid + * \param[in] cur the local data at the current time-step + * whose ghosts need exchanging + */ +void exchange(MPI_Comm cart_comm, double cur[dsize[0]][dsize[1]]) { + MPI_Status status; + int rank_source, rank_dest; + static MPI_Datatype column, row; + static int initialized = 0; + + if (!initialized) { + MPI_Type_vector(dsize[0] - 2, 1, dsize[1], MPI_DOUBLE, &column); + MPI_Type_commit(&column); + MPI_Type_contiguous(dsize[1] - 2, MPI_DOUBLE, &row); + MPI_Type_commit(&row); + initialized = 1; + } + + // send down + MPI_Cart_shift(cart_comm, 0, 1, &rank_source, &rank_dest); + MPI_Sendrecv(&cur[dsize[0] - 2][1], 1, row, rank_dest, 100, // send row before ghost + &cur[0][1], 1, row, rank_source, 100, // receive 1st row (ghost) + cart_comm, &status); + + // send up + MPI_Cart_shift(cart_comm, 0, -1, &rank_source, &rank_dest); + MPI_Sendrecv(&cur[1][1], 1, row, rank_dest, 100, // send row after ghost + &cur[dsize[0] - 1][1], 1, row, rank_source, 100, // receive last row (ghost) + cart_comm, &status); + + // send to the right + MPI_Cart_shift(cart_comm, 1, 1, &rank_source, &rank_dest); + MPI_Sendrecv(&cur[1][dsize[1] - 2], 1, column, rank_dest, 100, // send column before ghost + &cur[1][0], 1, column, rank_source, 100, // receive 1st column (ghost) + cart_comm, &status); + + // send to the left + MPI_Cart_shift(cart_comm, 1, -1, &rank_source, &rank_dest); + MPI_Sendrecv(&cur[1][1], 1, column, rank_dest, 100, // send column after ghost + &cur[1][dsize[1] - 1], 1, column, rank_source, 100, // receive last column (ghost) + cart_comm, &status); +} + +void create_mesh_and_ghost_type( double coords_x[dsize[0]+1][dsize[1]+1], double coords_y[dsize[0]+1][dsize[1]+1], + uint8_t ghosts_type_cells[dsize[0]][dsize[1]]){ + + // catalyst variables + int cells_ghost=1; + + size_t number_of_points[2]; + number_of_points[0] = dsize[0]+1; + number_of_points[1] = dsize[1]+1; + size_t total_number_of_points = number_of_points[0]*number_of_points[1]; + + // the first axis correspond to the y-coordinate. + for(int ix=0; ix Date: Mon, 15 Sep 2025 15:28:07 +0200 Subject: [PATCH 12/31] change data name for ghosts --- plugins/catalyst/CMakeLists.txt | 1 + .../config_init.yml | 5 ++--- .../test_tuto_september_2025_ghost_attributes/main.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 1ad9d2703..6ac3823ad 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -19,4 +19,5 @@ install(TARGETS pdi_catalyst_plugin if(BUILD_TESTING) enable_testing() add_subdirectory(test) + add_subdirectory(test_tuto_september_2025_ghost_attributes) endif() diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml index 9f4852724..b3dab65b3 100644 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml @@ -15,10 +15,9 @@ pdi: data: iteration: int temp: {type: array, subtype: double, size: ['$local_size[0]', '$local_size[1]']} - total_number_of_points: size_t cycle: int time: double - ghosts: { type: array, subtype: uint8_t, size: '$local_size[0]*$local_size[1]' } + mask_ghosts: { type: array, subtype: uint8_t, size: '$local_size[0]*$local_size[1]' } coords_x: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } coords_y: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } plugins: @@ -68,5 +67,5 @@ pdi: topology: "my_mesh" volume_dependent: "false" values: - PDI_data_array: "ghosts" + PDI_data_array: "mask_ghosts" size: '$local_size[0]*$local_size[1]' \ No newline at end of file diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c index 092da6686..cb3458c8e 100644 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c +++ b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c @@ -273,7 +273,7 @@ int main(int argc, char *argv[]) { PDI_multi_expose("catalyst_execute", "cycle", &ii, PDI_OUT, "time", &time, PDI_OUT, "temp", cur, PDI_OUT, - "ghosts", ghost_type_cells, PDI_OUT, + "mask_ghosts", ghost_type_cells, PDI_OUT, "coords_x", coords_x, PDI_OUT, "coords_y", coords_y, PDI_OUT, NULL); @@ -294,7 +294,7 @@ int main(int argc, char *argv[]) { PDI_multi_expose("catalyst_execute", "cycle", &ii, PDI_OUT, "time", &time, PDI_OUT, "temp", cur, PDI_OUT, - "ghosts", ghost_type_cells, PDI_OUT, + "mask_ghosts", ghost_type_cells, PDI_OUT, "coords_x", coords_x, PDI_OUT, "coords_y", coords_y, PDI_OUT, NULL); From c598d9a5499f8eae2b9d0d892db19a442afb31f9 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Wed, 19 Nov 2025 10:47:58 +0100 Subject: [PATCH 13/31] Remove example of the tututorial, fix bug of Catalyst_DIR, clean the output --- plugins/catalyst/CMakeLists.txt | 3 +- plugins/catalyst/pdi_catalyst_plugin.cxx | 81 +++-- plugins/catalyst/test/Attributes.cxx | 14 +- plugins/catalyst/test/PDIAdaptor.cxx | 2 +- .../CMakeLists.txt | 41 --- .../catalyst_pipeline.py | 20 -- .../catalyst_pipeline_with_rendering.py | 99 ------ .../config_init.yml | 71 ---- .../main.c | 315 ------------------ .../run_test.py | 18 - .../run_test_mpi.py | 18 - 11 files changed, 56 insertions(+), 626 deletions(-) delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/run_test.py delete mode 100644 plugins/catalyst/test_tuto_september_2025_ghost_attributes/run_test_mpi.py diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 6ac3823ad..998ebb674 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.13) project(pdi_catalyst_plugin LANGUAGES C CXX) find_package(PDI REQUIRED COMPONENTS plugins) -find_package(Catalyst REQUIRED) +find_package(catalyst REQUIRED) if(NOT ${CATALYST_USE_MPI}) message(FATAL_ERROR "No MPI support in your Catalyst library, please activate MPI in your Catalyst build.") endif() @@ -19,5 +19,4 @@ install(TARGETS pdi_catalyst_plugin if(BUILD_TESTING) enable_testing() add_subdirectory(test) - add_subdirectory(test_tuto_september_2025_ghost_attributes) endif() diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index ee69b9a73..fe6fe84f3 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -45,7 +45,6 @@ void catalyst_plugin::ProcessData(const std::string& data_name, PDI::Ref ref) void catalyst_plugin::ProcessEvent(const std::string& event_name) { - context().logger().trace("####### call catalyst_plugin::ProcessEvent ######"); if (event_name == this->PDIExecuteEventName) { RunCatalystExecute(); @@ -107,11 +106,12 @@ void catalyst_plugin::RunCatalystExecute() case YAML_PLAIN_SCALAR_STYLE: // handle integer or float/double type that doesn't depend on PDI store { - PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Expression data_expression{ PDI::to_string(current.tree) }; PDI::Ref_r spec_ref = data_expression.to_ref(context()); auto data_type = spec_ref.type()->evaluate(context()); - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) + if (auto scalar_datatype = + std::dynamic_pointer_cast(data_type)) { PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); if (scalar_kind == PDI::Scalar_kind::SIGNED) @@ -120,8 +120,10 @@ void catalyst_plugin::RunCatalystExecute() } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { - context().logger().error("The expression {} is defined as unsigned integer.", PDI::to_string(current.tree)); - // context().logger().trace("The expression {} is defined with unsigned integer. It is tranformed to signed integer.", PDI::to_string(current.tree)); + context().logger().error("The expression {} is defined as unsigned integer.", + PDI::to_string(current.tree)); + // context().logger().trace("The expression {} is defined with unsigned integer. + // It is tranformed to signed integer.", PDI::to_string(current.tree)); // current_node.set_int64(data_expression.to_long(context())); } else if (scalar_kind == PDI::Scalar_kind::FLOAT) @@ -130,30 +132,35 @@ void catalyst_plugin::RunCatalystExecute() } else { - context().logger().error("Unknown Scalar Type for variable {}", PDI::to_string(current.tree)); + context().logger().error( + "Unknown Scalar Type for variable {}", PDI::to_string(current.tree)); } } else { - context().logger().error("Unsupported datatype for variable: {}", PDI::to_string(current.tree)); + context().logger().error( + "Unsupported datatype for variable: {}", PDI::to_string(current.tree)); } } break; case YAML_SINGLE_QUOTED_SCALAR_STYLE: // handle integer or float/double type that depend on scalar PDI data { - std::string data_name{PDI::to_string(current.tree)}; - PDI::Expression data_expression{PDI::to_string(current.tree)}; + std::string data_name{ PDI::to_string(current.tree) }; + PDI::Expression data_expression{ PDI::to_string(current.tree) }; PDI::Ref_r spec_ref = data_expression.to_ref(context()); auto data_type = spec_ref.type()->evaluate(context()); - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) + if (auto scalar_datatype = + std::dynamic_pointer_cast(data_type)) { - FillNodeWithScalarPDIData(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + FillNodeWithScalarPDIData( + conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); } else { - context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + context().logger().error( + "Unsupported datatype for variable: {}. It should be scalar type.", data_name); } } break; @@ -173,14 +180,14 @@ void catalyst_plugin::RunCatalystExecute() case YAML_MAPPING_NODE: int data_tree_size = PDI::len(current.tree); // Check for dynamic PDI Data array - bool pdi_data_array=false; + bool pdi_data_array = false; for (int i = data_tree_size - 1; i >= 0; --i) { auto key = PC_get(current.tree, "{%d}", i); if (PDI::to_string(key) == "PDI_data_array") { this->FillNodeWithPDIDataArray(conduit_cpp::c_node(¤t_node), current.tree); - pdi_data_array=true; + pdi_data_array = true; break; // break the loop } } @@ -194,11 +201,9 @@ void catalyst_plugin::RunCatalystExecute() { auto key = PC_get(current.tree, "{%d}", i); auto value = PC_get(current.tree, "<%d>", i); - std::cout << "Mapping Node: key="<< PDI::to_string(key) << std::endl; remainingTreeAndParentNode.push( { value, PDI::to_string(key), conduit_cpp::c_node(¤t_node) }); } - std::cout << "End YAML_MAPPING_NODE" << std::endl; break; } } @@ -239,7 +244,8 @@ void catalyst_plugin::FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tre return; } - std::string name = PDI::to_string(name_spec); // Jacques: Perhaps we need an expression if the users add an index for example "my_name${index}". + std::string name = PDI::to_string(name_spec); // Jacques: Perhaps we need an expression if the + // users add an index for example "my_name${index}". auto it = this->CurrentPDIData.find(name); if (it == this->CurrentPDIData.end()) { @@ -262,7 +268,8 @@ void catalyst_plugin::FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tre } else { - context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); + context().logger().error( + "Unsupported datatype for variable: {}. The type should be array type.", name); } } @@ -368,7 +375,8 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st } else { - context().logger().error("Unknown the number of elements for variable{} passed to catalyst.", name); + context().logger().error( + "Unknown the number of elements for variable{} passed to catalyst.", name); } conduit_index_t offset = 0; @@ -495,7 +503,7 @@ long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::strin { if (spec.node->type == YAML_SCALAR_NODE) { - PDI::Expression data_expression{PDI::to_string(spec)}; + PDI::Expression data_expression{ PDI::to_string(spec) }; PDI::Ref_r spec_ref = data_expression.to_ref(context()); if (!spec_ref) { @@ -505,24 +513,27 @@ long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::strin auto data_type = spec_ref.type()->evaluate(context()); if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) - { - return data_expression.to_long(context()); - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) - { - // Jacques: auto value = ref_r.scalar_value();?? a utiliser en fonction du buffersize ?? - return data_expression.to_long(context()); - } - else - { - context().logger().error("Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); - } + PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) + { + return data_expression.to_long(context()); + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) + { + // Jacques: auto value = ref_r.scalar_value();?? a utiliser en fonction du buffersize + // ?? + return data_expression.to_long(context()); + } + else + { + context().logger().error( + "Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); + } } else { - context().logger().error("The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); + context().logger().error( + "The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); } return 0; } diff --git a/plugins/catalyst/test/Attributes.cxx b/plugins/catalyst/test/Attributes.cxx index 31d009ad9..ba837a5cf 100644 --- a/plugins/catalyst/test/Attributes.cxx +++ b/plugins/catalyst/test/Attributes.cxx @@ -26,17 +26,19 @@ void Attributes::UpdateFields(double time) size_t numCells = this->GridPtr->GetNumberOfCells(); this->Pressure.resize(numCells); - double tmp_var=(numCells * time*0.5); + double tmp_var = (numCells * time * 0.5); size_t first_cells; - if( tmp_var < 0 ){ - first_cells=0; + if (tmp_var < 0) + { + first_cells = 0; } - else { - first_cells= (size_t) tmp_var; + else + { + first_cells = (size_t)tmp_var; } std::fill(this->Pressure.begin(), this->Pressure.end(), -1.f); - std::fill(this->Pressure.begin()+first_cells, this->Pressure.end(), 1.f); + std::fill(this->Pressure.begin() + first_cells, this->Pressure.end(), 1.f); } double* Attributes::GetVelocityArray() diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/test/PDIAdaptor.cxx index f50ae6361..de383212e 100644 --- a/plugins/catalyst/test/PDIAdaptor.cxx +++ b/plugins/catalyst/test/PDIAdaptor.cxx @@ -3,9 +3,9 @@ #include "Attributes.h" #include "Grid.h" +#include #include #include -#include namespace PDIAdaptor { diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt deleted file mode 100644 index 40873b73d..000000000 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/CMakeLists.txt +++ /dev/null @@ -1,41 +0,0 @@ -#============================================================================= -# Copyright (C) 2015-2023 Commissariat a l'energie atomique et aux energies alternatives (CEA) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -#============================================================================= - -cmake_minimum_required(VERSION 3.16) -#project(pdi_init LANGUAGES C) - -#find_package(spdlog) -find_package(MPI REQUIRED COMPONENTS C) -#find_package(paraconf 1.0.0 REQUIRED COMPONENTS C) -#find_package(PDI 1.9.0 REQUIRED COMPONENTS C) - -set(CMAKE_C_STANDARD 99) - -add_executable(tuto_main_ghost_attributes main.c) -target_link_libraries(tuto_main_ghost_attributes m MPI::MPI_C paraconf::paraconf PDI::pdi) - -set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes) -configure_file(config_init.yml config.yml) - -find_package(Python3 COMPONENTS Interpreter) -add_test(NAME Test_tuto_september_2025_ghost_attributes COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/run_test.py" "${CMAKE_BINARY_DIR}/test_tuto_september_2025_ghost_attributes/" "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/") -add_test(NAME Test_tuto_september_2025_ghost_attributes_MPI COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/run_test_mpi.py" "${CMAKE_BINARY_DIR}/test_tuto_september_2025_ghost_attributes/" "${CMAKE_SOURCE_DIR}/test_tuto_september_2025_ghost_attributes/" "${MPIEXEC_EXECUTABLE}") diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py deleted file mode 100644 index d815a4ec9..000000000 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline.py +++ /dev/null @@ -1,20 +0,0 @@ -from paraview.simple import * - -# Greeting to ensure that ctest knows this script is being imported -print("executing catalyst_pipeline") - -# registrationName must match the channel name used in the -# 'CatalystAdaptor'. -producer = TrivialProducer(registrationName="grid") - -def catalyst_execute(info): - global producer - producer.UpdatePipeline() - print("-----------------------------------") - print("executing (cycle={}, time={})".format(info.cycle, info.time)) - print("bounds:", producer.GetDataInformation().GetBounds()) -# print("velocity-magnitude-range:", producer.PointData["velocity"].GetRange(-1)) - print("temperature-range:", producer.CellData["temperature"].GetRange(0)) -# make sure that the ghost array has been renamed to vtkGhostType - print("temperature-range22:", producer.CellData["vtkGhostType"].GetRange(0)) - diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py deleted file mode 100644 index 1c482e679..000000000 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/catalyst_pipeline_with_rendering.py +++ /dev/null @@ -1,99 +0,0 @@ -# script-version: 2.0 -from paraview.simple import * -from paraview import catalyst -import time - -# registrationName must match the channel name used in the -# 'CatalystAdaptor'. -producer = TrivialProducer(registrationName="grid") - -# ---------------------------------------------------------------- -# setup views used in the visualization -# ---------------------------------------------------------------- - -jj_ny = 60 + 2 -jj_nx = 12 + 2 - -div_jj_nx = jj_nx/2 -div_jj_ny = jj_ny/2 - -jj_pos = 1.2*jj_nx*900/120 -jj_focal_point = 2*jj_nx*240/120 - -# ######## render view temperature - -# Create a new 'Render View' -renderView1 = CreateView('RenderView') -renderView1.ViewSize = [1000,1600] -renderView1.CameraPosition = [div_jj_nx, div_jj_ny, jj_pos] -renderView1.CameraFocalPoint = [div_jj_nx, div_jj_ny, jj_focal_point] -renderView1.CameraParallelScale = 100 - -# get color transfer function/color map for 'temperature' -temperatureLUT = GetColorTransferFunction('temperature') -## RGB: first line: min value, last line: max value -temperatureLUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, - 100.0, 0.865003, 0.865003, 0.865003, - 200.0, 0.705882, 0.0156863, 0.14902] -temperatureLUT.ScalarRangeInitialized = 1.0 - -# show data from grid -# gridDisplay222 = Show(producer, renderView1, 'UnstructuredGridRepresentation') -gridDisplay222 = Show(producer, renderView1, 'StructuredGridRepresentation') - -gridDisplay222.Representation = 'Surface With Edges' -gridDisplay222.ColorArrayName = ['CELLS', 'temperature'] -gridDisplay222.LookupTable = temperatureLUT - -# get color legend/bar for temperatureLUT in view renderView1 -temperatureLUTColorBar = GetScalarBar(temperatureLUT, renderView1) -temperatureLUTColorBar.Title = 'temperature' -temperatureLUTColorBar.ComponentTitle = 'Magnitude' - -# set color bar visibility -temperatureLUTColorBar.Visibility = 1 - -# show color legend -gridDisplay222.SetScalarBarVisibility(renderView1, True) - - -# # ---------------------------------------------------------------- -# # setup extractors -# # ---------------------------------------------------------------- - -SetActiveView(renderView1) -# create extractor -pNG2= CreateExtractor('PNG', renderView1, registrationName='PNG2') -# trace defaults for the extractor. -pNG2.Trigger = 'TimeStep' - -# init the 'PNG' selected for 'Writer' -pNG2.Writer.FileName = 'temperature_screenshot_{timestep:06d}.png' -pNG2.Writer.ImageResolution = [1000,1600] -pNG2.Writer.Format = 'PNG' - - -# ------------------------------------------------------------------------------ -# Catalyst options -options = catalyst.Options() -## 0: pas de client, generration des images -## 1: interactif -options.EnableCatalystLive = 0 - - -# Greeting to ensure that ctest knows this script is being imported -print("#############################################################") -print("executing catalyst_pipeline") -print("#############################################################") -def catalyst_execute(info): - global producer - producer.UpdatePipeline() - print("-----------------------------------") - print("executing (cycle={}, time={})".format(info.cycle, info.time)) - print("bounds:", producer.GetDataInformation().GetBounds()) - print("temperature-range:", producer.CellData["temperature"].GetRange(0)) - # In a real simulation sleep is not needed. We use it here to slow down the - # "simulation" and make sure ParaView client can catch up with the produced - # results instead of having all of them flashing at once. - if options.EnableCatalystLive: - time.sleep(1) diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml deleted file mode 100644 index b3dab65b3..000000000 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/config_init.yml +++ /dev/null @@ -1,71 +0,0 @@ -# the alpha parameter -alpha: 0.125 -# global data-size (excluding the number of ghost layers for boundary conditions) -global_size: { height: 60, width: 12 } -# degree of parallelism (number of blocks in each dimension) -parallelism: { height: 2, width: 2 } - -pdi: - metadata: - local_size: {type: array, subtype: int, size: 2} - rank: int - pcoord: {type: array, subtype: int, size: 2} - psize: {type: array, subtype: int, size: 2} - max_iter: int - data: - iteration: int - temp: {type: array, subtype: double, size: ['$local_size[0]', '$local_size[1]']} - cycle: int - time: double - mask_ghosts: { type: array, subtype: uint8_t, size: '$local_size[0]*$local_size[1]' } - coords_x: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } - coords_y: { type: array, subtype: double, size: '($local_size[0]+1)*($local_size[1]+1)' } - plugins: - trace: - mpi: - catalyst: - scripts: - script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" - PDI_execute_event_name: "catalyst_execute" - execute: - state: - timestep: '$cycle' - time: '$time' - multiblock: 1 - channels: - grid: - type: "mesh" - data: - coordsets: - my_coords: - type: "explicit" - values: - x: - PDI_data_array: "coords_x" - size: '($local_size[0]+1)*($local_size[1]+1)' - y: - PDI_data_array: "coords_y" - size: '($local_size[0]+1)*($local_size[1]+1)' - topologies: - my_mesh: - type: "structured" - coordset: "my_coords" - elements: - dims: - i: '$local_size[1]' - j: '$local_size[0]' - fields: - temperature: - association: "element" - topology: "my_mesh" - volume_dependent: "false" - values: - PDI_data_array: "temp" - size: '$local_size[0]*$local_size[1]' - vtkGhostType: - association: "element" - topology: "my_mesh" - volume_dependent: "false" - values: - PDI_data_array: "mask_ghosts" - size: '$local_size[0]*$local_size[1]' \ No newline at end of file diff --git a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c b/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c deleted file mode 100644 index cb3458c8e..000000000 --- a/plugins/catalyst/test_tuto_september_2025_ghost_attributes/main.c +++ /dev/null @@ -1,315 +0,0 @@ -/******************************************************************************* - * Copyright (C) 2015-2025 Commissariat a l'energie atomique et aux energies - * alternatives (CEA) - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - ******************************************************************************/ - -#include - -#include -#include -#include -#include -#include - -#include -#include - -// size of the local data as [HEIGHT, WIDTH] including the number of ghost -// layers for communications or boundary conditions -int dsize[2]; - -// 2D size of the process grid as [HEIGHT, WIDTH] -int psize[2]; - -// 2D rank of the local process in the process grid as [YY, XX] -int pcoord[2]; - -// the alpha coefficient used in the computation -double alpha; - -double L = 1.0; -// definition of the source -// the source corresponds to a disk of an uniform value -// source1: center=(0.4,0.4), radius=0.2 and value=100 -double source1[4] = {0.4, 0.4, 0.2, 100}; -// source2: center=(0.8,0.7), radius=0.1 and value=200 -double source2[4] = {0.7, 0.8, 0.1, 200}; -// the order of the coordinates of the center (XX,YY) is inverted in the vector - -/** Initialize all the data to 0, with the exception of each cells - * whose center (cpos_x,cpos_y) is inside of the disks - * defined by source1 or source2 - * \param[out] dat the local data to initialize - */ -void init(double dat[dsize[0]][dsize[1]]) { - for (int yy = 0; yy < dsize[0]; ++yy) - for (int xx = 0; xx < dsize[1]; ++xx) - dat[yy][xx] = 0; - double dy = L / ((dsize[0] - 2) * psize[0]); - double dx = L / ((dsize[1] - 2) * psize[1]); - - double cpos_x, cpos_y; - double square_dist1, square_dist2; - for (int yy = 0; yy < dsize[0]; ++yy) { - cpos_y = (yy + pcoord[0] * (dsize[0] - 2)) * dy - 0.5 * dy; - for (int xx = 0; xx < dsize[1]; ++xx) { - cpos_x = (xx + pcoord[1] * (dsize[1] - 2)) * dx - 0.5 * dx; - square_dist1 = (cpos_y - source1[0]) * (cpos_y - source1[0]) + - (cpos_x - source1[1]) * (cpos_x - source1[1]); - if (square_dist1 <= source1[2] * source1[2]) { - dat[yy][xx] = source1[3]; - } - square_dist2 = (cpos_y - source2[0]) * (cpos_y - source2[0]) + - (cpos_x - source2[1]) * (cpos_x - source2[1]); - if (square_dist2 <= source2[2] * source2[2]) { - dat[yy][xx] = source2[3]; - } - } - } -} - -/** Compute the values at the next time-step based on the values at the current - * time-step - * \param[in] cur the local data at the current time-step - * \param[out] next the local data at the next time-step - */ -void iter(double cur[dsize[0]][dsize[1]], double next[dsize[0]][dsize[1]]) { - int xx, yy; - for (yy = 1; yy < dsize[0] - 1; ++yy) { - for (xx = 1; xx < dsize[1] - 1; ++xx) { - next[yy][xx] = (1. - 4. * alpha) * cur[yy][xx] + - alpha * (cur[yy][xx - 1] + cur[yy][xx + 1] + - cur[yy - 1][xx] + cur[yy + 1][xx]); - } - } -} - -/** Exchange ghost values with neighbours - * \param[in] cart_comm the MPI communicator with all processes organized in a - * 2D Cartesian grid - * \param[in] cur the local data at the current time-step - * whose ghosts need exchanging - */ -void exchange(MPI_Comm cart_comm, double cur[dsize[0]][dsize[1]]) { - MPI_Status status; - int rank_source, rank_dest; - static MPI_Datatype column, row; - static int initialized = 0; - - if (!initialized) { - MPI_Type_vector(dsize[0] - 2, 1, dsize[1], MPI_DOUBLE, &column); - MPI_Type_commit(&column); - MPI_Type_contiguous(dsize[1] - 2, MPI_DOUBLE, &row); - MPI_Type_commit(&row); - initialized = 1; - } - - // send down - MPI_Cart_shift(cart_comm, 0, 1, &rank_source, &rank_dest); - MPI_Sendrecv(&cur[dsize[0] - 2][1], 1, row, rank_dest, 100, // send row before ghost - &cur[0][1], 1, row, rank_source, 100, // receive 1st row (ghost) - cart_comm, &status); - - // send up - MPI_Cart_shift(cart_comm, 0, -1, &rank_source, &rank_dest); - MPI_Sendrecv(&cur[1][1], 1, row, rank_dest, 100, // send row after ghost - &cur[dsize[0] - 1][1], 1, row, rank_source, 100, // receive last row (ghost) - cart_comm, &status); - - // send to the right - MPI_Cart_shift(cart_comm, 1, 1, &rank_source, &rank_dest); - MPI_Sendrecv(&cur[1][dsize[1] - 2], 1, column, rank_dest, 100, // send column before ghost - &cur[1][0], 1, column, rank_source, 100, // receive 1st column (ghost) - cart_comm, &status); - - // send to the left - MPI_Cart_shift(cart_comm, 1, -1, &rank_source, &rank_dest); - MPI_Sendrecv(&cur[1][1], 1, column, rank_dest, 100, // send column after ghost - &cur[1][dsize[1] - 1], 1, column, rank_source, 100, // receive last column (ghost) - cart_comm, &status); -} - -void create_mesh_and_ghost_type( double coords_x[dsize[0]+1][dsize[1]+1], double coords_y[dsize[0]+1][dsize[1]+1], - uint8_t ghosts_type_cells[dsize[0]][dsize[1]]){ - - // catalyst variables - int cells_ghost=1; - - size_t number_of_points[2]; - number_of_points[0] = dsize[0]+1; - number_of_points[1] = dsize[1]+1; - size_t total_number_of_points = number_of_points[0]*number_of_points[1]; - - // the first axis correspond to the y-coordinate. - for(int ix=0; ix Date: Wed, 19 Nov 2025 12:05:44 +0100 Subject: [PATCH 14/31] remove comment for pressure plot --- .../test/catalyst_pipeline_with_rendering.py | 55 ------------------- 1 file changed, 55 deletions(-) diff --git a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py index cacf33810..a67be29f4 100644 --- a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py +++ b/plugins/catalyst/test/catalyst_pipeline_with_rendering.py @@ -59,61 +59,6 @@ pNG1.Writer.ImageResolution = [1600,800] pNG1.Writer.Format = 'PNG' -# # ######## render view pressure - -# # Create a new 'Render View' -# renderView2 = CreateView('RenderView') -# renderView2.ViewSize = [1600,800] -# renderView2.CameraPosition = [157.90070691620653, 64.91180236667495, 167.90421495515105] -# renderView2.CameraFocalPoint = [19.452526958533134, 28.491610229010647, 10.883993417012459] -# renderView2.CameraViewUp = [0.07934883419275315, 0.953396338566962, -0.2910999555468221] -# renderView2.CameraFocalDisk = 1.0 -# renderView2.CameraParallelScale = 54.99504523136608 - -# # get color transfer function/color map for 'velocity' -# pressureLUT = GetColorTransferFunction('pressure') -# pressureLUT.RGBPoints = [-2.0, 0.231373, 0.298039, 0.752941, -# 0.0, 0.865003, 0.865003, 0.865003, -# 2.0, 0.705882, 0.0156863, 0.14902] -# pressureLUT.ScalarRangeInitialized = 1.0 - -# # show data from grid -# gridDisplay222 = Show(producer, renderView2, 'UnstructuredGridRepresentation') ## pourquoi pas uniform possible -# #gridDisplay222 = Show(producer, renderView2, 'StructuredGridRepresentation') ## pourquoi pas uniform possible - - -# gridDisplay222.Representation = 'Surface' -# gridDisplay222.ColorArrayName = ['CELLS', 'pressure'] -# gridDisplay222.LookupTable = pressureLUT - -# # get color legend/bar for pressureLUT in view renderView2 -# pressureLUTColorBar = GetScalarBar(pressureLUT, renderView2) -# pressureLUTColorBar.Title = 'pressure' -# pressureLUTColorBar.ComponentTitle = 'Magnitude' - -# # set color bar visibility -# pressureLUTColorBar.Visibility = 1 - -# # show color legend -# gridDisplay222.SetScalarBarVisibility(renderView2, True) - - -# # # ---------------------------------------------------------------- -# # # setup extractors -# # # ---------------------------------------------------------------- - -# SetActiveView(renderView2) -# # create extractor -# pNG2= CreateExtractor('PNG', renderView2, registrationName='PNG2') -# # trace defaults for the extractor. -# pNG2.Trigger = 'TimeStep' - -# # init the 'PNG' selected for 'Writer' -# pNG2.Writer.FileName = 'pressure_screenshot_{timestep:06d}.png' -# pNG2.Writer.ImageResolution = [1600,800] -# pNG2.Writer.Format = 'PNG' - - # ------------------------------------------------------------------------------ # Catalyst options options = catalyst.Options() From d150b394092e96849f9a020fcfa0af29b7839e03 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Fri, 21 Nov 2025 15:00:16 +0100 Subject: [PATCH 15/31] Fix #482, fix suggestion --- AUTHORS | 6 +++--- CHANGELOG.md | 4 ++-- plugins/catalyst/CMakeLists.txt | 14 +++++++++----- plugins/catalyst/pdi_catalyst_plugin.cxx | 3 +-- plugins/catalyst/{test => tests}/Attributes.cxx | 0 plugins/catalyst/{test => tests}/Attributes.h | 0 plugins/catalyst/{test => tests}/CMakeLists.txt | 15 ++++++++++----- plugins/catalyst/{test => tests}/Grid.cxx | 0 plugins/catalyst/{test => tests}/Grid.h | 0 plugins/catalyst/{test => tests}/PDIAdaptor.cxx | 0 plugins/catalyst/{test => tests}/PDIAdaptor.h | 0 .../catalyst/{test => tests}/catalyst_pipeline.py | 0 .../catalyst_pipeline_with_rendering.py | 2 +- plugins/catalyst/{test => tests}/main.cxx | 0 plugins/catalyst/{test => tests}/pdi.yml.in | 2 +- .../references/execute_reference.json | 0 .../references/execute_reference_rank0.json | 0 .../references/execute_reference_rank1.json | 0 .../references/execute_reference_rank2.json | 0 .../references/execute_reference_rank3.json | 0 .../references/finalize_reference.json | 0 .../references/initialize_reference.json | 0 plugins/catalyst/{test => tests}/run_test.py | 0 plugins/catalyst/{test => tests}/run_test_mpi.py | 0 24 files changed, 27 insertions(+), 19 deletions(-) rename plugins/catalyst/{test => tests}/Attributes.cxx (100%) rename plugins/catalyst/{test => tests}/Attributes.h (100%) rename plugins/catalyst/{test => tests}/CMakeLists.txt (50%) rename plugins/catalyst/{test => tests}/Grid.cxx (100%) rename plugins/catalyst/{test => tests}/Grid.h (100%) rename plugins/catalyst/{test => tests}/PDIAdaptor.cxx (100%) rename plugins/catalyst/{test => tests}/PDIAdaptor.h (100%) rename plugins/catalyst/{test => tests}/catalyst_pipeline.py (100%) rename plugins/catalyst/{test => tests}/catalyst_pipeline_with_rendering.py (98%) rename plugins/catalyst/{test => tests}/main.cxx (100%) rename plugins/catalyst/{test => tests}/pdi.yml.in (98%) rename plugins/catalyst/{test => tests}/references/execute_reference.json (100%) rename plugins/catalyst/{test => tests}/references/execute_reference_rank0.json (100%) rename plugins/catalyst/{test => tests}/references/execute_reference_rank1.json (100%) rename plugins/catalyst/{test => tests}/references/execute_reference_rank2.json (100%) rename plugins/catalyst/{test => tests}/references/execute_reference_rank3.json (100%) rename plugins/catalyst/{test => tests}/references/finalize_reference.json (100%) rename plugins/catalyst/{test => tests}/references/initialize_reference.json (100%) rename plugins/catalyst/{test => tests}/run_test.py (100%) rename plugins/catalyst/{test => tests}/run_test_mpi.py (100%) diff --git a/AUTHORS b/AUTHORS index b167fab30..0a7b26582 100644 --- a/AUTHORS +++ b/AUTHORS @@ -28,6 +28,9 @@ Benoit Martin - CEA (bmartin@cea.fr) François Mazen - Kitware (francois.mazen@kitware.com) * Catalyst plugin +François Mazen - Kitware (francois.mazen@kitware.com) +* Catalyst plugin creation + François-Xavier Mordant - CEA (francois-xavier.mordant@cea.fr) * Fixed CMake issues, internal API enhancement * Bug fix, JSON plugin @@ -114,6 +117,3 @@ Benedikt Steinbusch - FZJ (b.steinbusch@fz-juelich.de) Yushan Wang - CEA (yushan.wang@cea.fr) * Maintainer (Sept. 2023 - ...) -* enable HDF5 subfiling -* Add native compression support in Decl'NetCDF - diff --git a/CHANGELOG.md b/CHANGELOG.md index 58ef0c66e..2b6ccb24d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,8 @@ and this project adheres to ## [Unreleased] -### For users +### Added +* Add Catalyst plugin from Kitware [#496](https://github.com/pdidev/pdi/pull/496) #### Added @@ -268,7 +269,6 @@ and this project adheres to * NetCDF plugin now support the size_of attribute, fixes [#446](https://gitlab.maisondelasimulation.fr/pdidev/pdi/-/issues/446) * Deisa plugin for in-situ analysis using Dask -* Add Catalyst plugin from Kitware #### Changed * Update the version of dependencies according to our policy: oldest supported diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 998ebb674..435926efc 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -1,22 +1,26 @@ -cmake_minimum_required(VERSION 3.13) +cmake_minimum_required(VERSION 3.16...3.29) project(pdi_catalyst_plugin LANGUAGES C CXX) +# PDI find_package(PDI REQUIRED COMPONENTS plugins) +# Catalyst find_package(catalyst REQUIRED) if(NOT ${CATALYST_USE_MPI}) - message(FATAL_ERROR "No MPI support in your Catalyst library, please activate MPI in your Catalyst build.") + message(WARNING "No MPI support in your Catalyst library, please activate MPI in your Catalyst build if you want to use in parallel.") endif() -add_library(pdi_catalyst_plugin MODULE pdi_catalyst_plugin.h pdi_catalyst_plugin.cxx) +# The Plugin +add_library(pdi_catalyst_plugin MODULE pdi_catalyst_plugin.cxx) target_link_libraries(pdi_catalyst_plugin PDI::PDI_plugins catalyst::catalyst) -# installation +# Installation set(INSTALL_PDIPLUGINDIR "${PDI_DEFAULT_PLUGINDIR}" CACHE PATH "PDI plugins (${PDI_DEFAULT_PLUGINDIR})") install(TARGETS pdi_catalyst_plugin LIBRARY DESTINATION "${INSTALL_PDIPLUGINDIR}" ) +# Tests if(BUILD_TESTING) enable_testing() - add_subdirectory(test) + add_subdirectory(tests) endif() diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index fe6fe84f3..f4192e07b 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -2,7 +2,6 @@ #include "catalyst.hpp" -#include #include catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) @@ -547,7 +546,7 @@ long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::strin std::string catalyst_plugin::ReadPDIExecuteEventName() { std::string eventName; - auto execute_spec = PC_get(this->SpecTree, ".PDI_execute_event_name"); + auto execute_spec = PC_get(this->SpecTree, ".on_event"); if (PC_status(execute_spec) == PC_OK) { eventName = PDI::to_string(execute_spec); diff --git a/plugins/catalyst/test/Attributes.cxx b/plugins/catalyst/tests/Attributes.cxx similarity index 100% rename from plugins/catalyst/test/Attributes.cxx rename to plugins/catalyst/tests/Attributes.cxx diff --git a/plugins/catalyst/test/Attributes.h b/plugins/catalyst/tests/Attributes.h similarity index 100% rename from plugins/catalyst/test/Attributes.h rename to plugins/catalyst/tests/Attributes.h diff --git a/plugins/catalyst/test/CMakeLists.txt b/plugins/catalyst/tests/CMakeLists.txt similarity index 50% rename from plugins/catalyst/test/CMakeLists.txt rename to plugins/catalyst/tests/CMakeLists.txt index 48a1cffe3..01b9013e2 100644 --- a/plugins/catalyst/test/CMakeLists.txt +++ b/plugins/catalyst/tests/CMakeLists.txt @@ -1,4 +1,4 @@ - +# Creation of executable add_executable(TestPDICatalyst Grid.cxx Grid.h @@ -8,8 +8,9 @@ add_executable(TestPDICatalyst PDIAdaptor.h PDIAdaptor.cxx) - +# MPI find_package(MPI COMPONENTS C CXX REQUIRED) + target_link_libraries(TestPDICatalyst PRIVATE MPI::MPI_C @@ -17,9 +18,13 @@ target_link_libraries(TestPDICatalyst paraconf::paraconf PDI::pdi) -set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/test) +set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests) configure_file(pdi.yml.in pdi.yml) find_package(Python3 COMPONENTS Interpreter) -add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test/run_test.py" "${CMAKE_BINARY_DIR}/test/" "${CMAKE_SOURCE_DIR}/test/") -add_test(NAME TestPDICatalystMPI COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/test/run_test_mpi.py" "${CMAKE_BINARY_DIR}/test/" "${CMAKE_SOURCE_DIR}/test/" "${MPIEXEC_EXECUTABLE}") +add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/") + +# test with MPI +if(${CATALYST_USE_MPI}) + add_test(NAME TestPDICatalystMPI COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test_mpi.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/" "${MPIEXEC_EXECUTABLE}") +endif() diff --git a/plugins/catalyst/test/Grid.cxx b/plugins/catalyst/tests/Grid.cxx similarity index 100% rename from plugins/catalyst/test/Grid.cxx rename to plugins/catalyst/tests/Grid.cxx diff --git a/plugins/catalyst/test/Grid.h b/plugins/catalyst/tests/Grid.h similarity index 100% rename from plugins/catalyst/test/Grid.h rename to plugins/catalyst/tests/Grid.h diff --git a/plugins/catalyst/test/PDIAdaptor.cxx b/plugins/catalyst/tests/PDIAdaptor.cxx similarity index 100% rename from plugins/catalyst/test/PDIAdaptor.cxx rename to plugins/catalyst/tests/PDIAdaptor.cxx diff --git a/plugins/catalyst/test/PDIAdaptor.h b/plugins/catalyst/tests/PDIAdaptor.h similarity index 100% rename from plugins/catalyst/test/PDIAdaptor.h rename to plugins/catalyst/tests/PDIAdaptor.h diff --git a/plugins/catalyst/test/catalyst_pipeline.py b/plugins/catalyst/tests/catalyst_pipeline.py similarity index 100% rename from plugins/catalyst/test/catalyst_pipeline.py rename to plugins/catalyst/tests/catalyst_pipeline.py diff --git a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py b/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py similarity index 98% rename from plugins/catalyst/test/catalyst_pipeline_with_rendering.py rename to plugins/catalyst/tests/catalyst_pipeline_with_rendering.py index a67be29f4..51769ac96 100644 --- a/plugins/catalyst/test/catalyst_pipeline_with_rendering.py +++ b/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py @@ -68,7 +68,6 @@ # Greeting to ensure that ctest knows this script is being imported -print("executing catalyst_pipeline") def catalyst_execute(info): global producer producer.UpdatePipeline() @@ -77,6 +76,7 @@ def catalyst_execute(info): print("bounds:", producer.GetDataInformation().GetBounds()) print("velocity-magnitude-range:", producer.PointData["velocity"].GetRange(-1)) print("pressure-range:", producer.CellData["pressure"].GetRange(0)) + # In a real simulation sleep is not needed. We use it here to slow down the # "simulation" and make sure ParaView client can catch up with the produced # results instead of having all of them flashing at once. diff --git a/plugins/catalyst/test/main.cxx b/plugins/catalyst/tests/main.cxx similarity index 100% rename from plugins/catalyst/test/main.cxx rename to plugins/catalyst/tests/main.cxx diff --git a/plugins/catalyst/test/pdi.yml.in b/plugins/catalyst/tests/pdi.yml.in similarity index 98% rename from plugins/catalyst/test/pdi.yml.in rename to plugins/catalyst/tests/pdi.yml.in index 20106801e..6ef8e25df 100644 --- a/plugins/catalyst/test/pdi.yml.in +++ b/plugins/catalyst/tests/pdi.yml.in @@ -31,7 +31,7 @@ plugins: catalyst: scripts: script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" - PDI_execute_event_name: "catalyst_execute" + on_event: "catalyst_execute" execute: state: timestep: '$cycle' diff --git a/plugins/catalyst/test/references/execute_reference.json b/plugins/catalyst/tests/references/execute_reference.json similarity index 100% rename from plugins/catalyst/test/references/execute_reference.json rename to plugins/catalyst/tests/references/execute_reference.json diff --git a/plugins/catalyst/test/references/execute_reference_rank0.json b/plugins/catalyst/tests/references/execute_reference_rank0.json similarity index 100% rename from plugins/catalyst/test/references/execute_reference_rank0.json rename to plugins/catalyst/tests/references/execute_reference_rank0.json diff --git a/plugins/catalyst/test/references/execute_reference_rank1.json b/plugins/catalyst/tests/references/execute_reference_rank1.json similarity index 100% rename from plugins/catalyst/test/references/execute_reference_rank1.json rename to plugins/catalyst/tests/references/execute_reference_rank1.json diff --git a/plugins/catalyst/test/references/execute_reference_rank2.json b/plugins/catalyst/tests/references/execute_reference_rank2.json similarity index 100% rename from plugins/catalyst/test/references/execute_reference_rank2.json rename to plugins/catalyst/tests/references/execute_reference_rank2.json diff --git a/plugins/catalyst/test/references/execute_reference_rank3.json b/plugins/catalyst/tests/references/execute_reference_rank3.json similarity index 100% rename from plugins/catalyst/test/references/execute_reference_rank3.json rename to plugins/catalyst/tests/references/execute_reference_rank3.json diff --git a/plugins/catalyst/test/references/finalize_reference.json b/plugins/catalyst/tests/references/finalize_reference.json similarity index 100% rename from plugins/catalyst/test/references/finalize_reference.json rename to plugins/catalyst/tests/references/finalize_reference.json diff --git a/plugins/catalyst/test/references/initialize_reference.json b/plugins/catalyst/tests/references/initialize_reference.json similarity index 100% rename from plugins/catalyst/test/references/initialize_reference.json rename to plugins/catalyst/tests/references/initialize_reference.json diff --git a/plugins/catalyst/test/run_test.py b/plugins/catalyst/tests/run_test.py similarity index 100% rename from plugins/catalyst/test/run_test.py rename to plugins/catalyst/tests/run_test.py diff --git a/plugins/catalyst/test/run_test_mpi.py b/plugins/catalyst/tests/run_test_mpi.py similarity index 100% rename from plugins/catalyst/test/run_test_mpi.py rename to plugins/catalyst/tests/run_test_mpi.py From 11517e50e7776f06aca5805d24415d204b304fe3 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Mon, 24 Nov 2025 09:24:03 +0100 Subject: [PATCH 16/31] remove clang-format and gitignore --- plugins/catalyst/.clang-format | 24 ------------------------ plugins/catalyst/.gitignore | 1 - 2 files changed, 25 deletions(-) delete mode 100644 plugins/catalyst/.clang-format delete mode 100644 plugins/catalyst/.gitignore diff --git a/plugins/catalyst/.clang-format b/plugins/catalyst/.clang-format deleted file mode 100644 index 6035c4080..000000000 --- a/plugins/catalyst/.clang-format +++ /dev/null @@ -1,24 +0,0 @@ -# Note: if you change any of the settings here, please reformat the entire -# codebase as part of the same commit, that will prevent subsequent commits -# from being flagged as being improperly formatted. - ---- -# This configuration requires clang-format 8.0 or higher. -BasedOnStyle: Mozilla -AlignAfterOpenBracket: DontAlign -AlignOperands: false -AlwaysBreakAfterReturnType: None -AlwaysBreakAfterDefinitionReturnType: None -BreakBeforeBraces: Allman -BinPackArguments: true -BinPackParameters: true -ColumnLimit: 100 -SpaceAfterTemplateKeyword: true -Standard: Cpp11 -StatementMacros: -- vtkAbstractTypeMacro -- vtkTypeMacro -- vtkBaseTypeMacro -- vtkAbstractTemplateTypeMacro -- vtkAbstractTypeMacroWithNewInstanceType -... diff --git a/plugins/catalyst/.gitignore b/plugins/catalyst/.gitignore deleted file mode 100644 index 40776c217..000000000 --- a/plugins/catalyst/.gitignore +++ /dev/null @@ -1 +0,0 @@ -test/__pycache__/ From 31bfc2c2e62775d4634730a4555ca583da11e7d6 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 2 Dec 2025 16:20:13 +0100 Subject: [PATCH 17/31] improve the method to take account the integer type --- plugins/catalyst/CMakeLists.txt | 6 +- plugins/catalyst/README.md | 19 +++++- plugins/catalyst/pdi_catalyst_plugin.cxx | 87 +++++++++++++++--------- plugins/catalyst/tests/CMakeLists.txt | 1 + plugins/catalyst/tests/run_test.py | 61 ++++++++++------- plugins/catalyst/tests/run_test_mpi.py | 68 ++++++++++-------- 6 files changed, 148 insertions(+), 94 deletions(-) diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 435926efc..8d136bf96 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -3,6 +3,10 @@ project(pdi_catalyst_plugin LANGUAGES C CXX) # PDI find_package(PDI REQUIRED COMPONENTS plugins) + +# Python CATALYST_WRAP_FORTRAN=ON +find_package(Python3 REQUIRED COMPONENTS Interpreter Development) + # Catalyst find_package(catalyst REQUIRED) if(NOT ${CATALYST_USE_MPI}) @@ -20,7 +24,7 @@ install(TARGETS pdi_catalyst_plugin ) # Tests -if(BUILD_TESTING) +if("${BUILD_TESTING}") enable_testing() add_subdirectory(tests) endif() diff --git a/plugins/catalyst/README.md b/plugins/catalyst/README.md index 3b8d722fb..a7cd9c66d 100644 --- a/plugins/catalyst/README.md +++ b/plugins/catalyst/README.md @@ -10,7 +10,7 @@ This PDI plugin pushes PDI shared data to the Catalyst 2 API. The goal is to lev - Configure with CMake with variables: * `PDI_DIR` points to `pdi/install/folder/share/pdi/cmake` * `paraconf_DIR` points to `pdi/install/folder/share/paraconf/cmake` - * `Catalyst_DIR` points to `catalyst/install/folder/lib/cmake/catalyst-2.0` + * `catalyst_DIR` points to `catalyst/install/folder/lib/cmake/catalyst-2.0` * optional: `BUILD_TESTING=ON` to build the example test * in case of you used vendored version of libraries during your PDI build, instead of system libraries, you may have to define additional PDI dependencies locations. For example `spdlog_DIR` CMake variable for the spdlog library. - Build with `make` or `ninja` @@ -36,11 +36,24 @@ However, Catalyst requires additional semantic about meanings of the data, to ma The current approach is to add this semantic to the PDI Specification Tree under the `catalyst` key. See the [example file](test/pdi.yml.in) for actual implementation. PDI is very flexible about the timing of the data sharing using an advanced event mechanism, whereas Catalyst needs all data at the same point in time. -So, the user of this plugin should set an event name referenced by the `PDI_execute_event_name` key in the yaml config, in order to trigger the call to `catalyst_execute`. Data should have been shared either before the event or during the event using the `PDI_multi_expose` function. +So, the user of this plugin should set an event name referenced by the `on_event` key in the yaml config, in order to trigger the call to `catalyst_execute`. Data should have been shared either before the event or during the event using the `PDI_multi_expose` function. Internally, `catalyst_initialize` is called by `PDI_Init` and `catalyst_finalize` is called by `PDI_finalize`. -In the specification tree, the `PDI_data` key indicates that the conduit node data should be set as external pointer to a data from the PDI data store. There is several subkeys to describe this data, like `name`, `offset`, `stride`, `multiply` to try to match every possible memory layout cases. + +In the sub-tree corresponding to the catalyst plugin, a double quoted value is evaluated as a string. + +In the specification tree, the `PDI_data_array` key indicates that the conduit node data should be set as external pointer to a data array from the PDI data store. The value of this key corresponds to the name of the data in PDI data store. There is several keys to describe this array like `size`, `offset`, `stride` to try to match every possible memory layout cases. In this case, these integers values are evaluated as conduit index type. + +By default other integer are evaluated as `long`. Excepted if the integer value depend on a data defined in PDI data store as `numXPoints` +in this example: +```yaml +dims: { i: '$numXPoints', j: '60', k: 44 } +``` +Be careful, if you compile conduit with 32-bits index (option `CONDUIT_INDEX_32`), you recommand to define a metadata/data for the index and pass the data as `i` in the previous example. + +In the case of real value, the value is evaluated as `double`. Excepted if the real value depend on a data defined in PDI data store. + # License diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index f4192e07b..12ac041c9 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -105,40 +105,21 @@ void catalyst_plugin::RunCatalystExecute() case YAML_PLAIN_SCALAR_STYLE: // handle integer or float/double type that doesn't depend on PDI store { + std::string data_name{ PDI::to_string(current.tree) }; PDI::Expression data_expression{ PDI::to_string(current.tree) }; PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) - { - current_node.set_int64(data_expression.to_long(context())); - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) - { - context().logger().error("The expression {} is defined as unsigned integer.", - PDI::to_string(current.tree)); - // context().logger().trace("The expression {} is defined with unsigned integer. - // It is tranformed to signed integer.", PDI::to_string(current.tree)); - // current_node.set_int64(data_expression.to_long(context())); - } - else if (scalar_kind == PDI::Scalar_kind::FLOAT) - { - current_node.set_float64(data_expression.to_double(context())); - } - else - { - context().logger().error( - "Unknown Scalar Type for variable {}", PDI::to_string(current.tree)); - } + FillNodeWithScalarPDIData( + conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); } else { context().logger().error( - "Unsupported datatype for variable: {}", PDI::to_string(current.tree)); + "Unsupported datatype for variable: {}. It should be scalar type.", data_name); } } break; @@ -239,12 +220,12 @@ void catalyst_plugin::FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tre auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { - context().logger().error("No \"name\" child in PDI_data spec."); + context().logger().error("No \"name\" child in PDI_data_array spec."); return; } - std::string name = PDI::to_string(name_spec); // Jacques: Perhaps we need an expression if the - // users add an index for example "my_name${index}". + std::string name = PDI::to_string(name_spec); + auto it = this->CurrentPDIData.find(name); if (it == this->CurrentPDIData.end()) { @@ -353,7 +334,6 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st PC_tree_t& tree, const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r) { PDI::Datatype_sptr type = array_datatype.subtype(); - // Jacques: Pourquoi une boucle While ?? Infini ?? while (auto&& array_type = std::dynamic_pointer_cast(type)) { type = array_type->subtype(); @@ -365,12 +345,24 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st return; } - // Jacques: il faut toujours que le .size soit defini ==> faire un test. conduit_index_t num_elements = 0; auto size_spec = PC_get(tree, ".size"); if (PC_status(size_spec) == PC_OK) { - num_elements = GetLongValueFromSpecNode(size_spec, name); + if (std::is_same::value) + { + num_elements = GetLongValueFromSpecNode(size_spec, name); + } + else + { + // case conduit_index_t is 32-bits + long tmp_num_elements = GetLongValueFromSpecNode(size_spec, name); + num_elements = static_cast(tmp_num_elements); + if (num_elements != tmp_num_elements) + { + context().logger().error("Error in cast of a type conduit_index_t in long. {} != {}", num_elements, tmp_num_elements); + } + } } else { @@ -382,16 +374,45 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st auto offset_spec = PC_get(tree, ".offset"); if (PC_status(offset_spec) == PC_OK) { - offset = GetLongValueFromSpecNode(offset_spec, name); + if (std::is_same::value) + { + offset = GetLongValueFromSpecNode(offset_spec, name); + } + else + { + // case conduit_index_t is 32-bits + long tmp_offset = GetLongValueFromSpecNode(offset_spec, name); + offset= static_cast(tmp_offset); + if (offset != tmp_offset) + { + context().logger().error("Error in cast of a type long in conduit_index_t {} != {}", offset, tmp_offset); + } + } } conduit_index_t stride = 1; auto stride_spec = PC_get(tree, ".stride"); if (PC_status(stride_spec) == PC_OK) { - stride = GetLongValueFromSpecNode(stride_spec, name); + if (std::is_same::value) + { + stride = GetLongValueFromSpecNode(stride_spec, name); + } + else + { + // case conduit_index_t is 32-bits + long tmp_stride = GetLongValueFromSpecNode(stride_spec, name); + stride = static_cast(tmp_stride); + if (stride != tmp_stride) + { + context().logger().error("Error in cast of a type long to conduit_index_t {} != {}", stride, tmp_stride); + } + } } + + + // computer endianness is used conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; PDI::Scalar_kind scalar_kind = scalar_datatype->kind(); @@ -519,8 +540,6 @@ long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::strin } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { - // Jacques: auto value = ref_r.scalar_value();?? a utiliser en fonction du buffersize - // ?? return data_expression.to_long(context()); } else diff --git a/plugins/catalyst/tests/CMakeLists.txt b/plugins/catalyst/tests/CMakeLists.txt index 01b9013e2..f2a48e851 100644 --- a/plugins/catalyst/tests/CMakeLists.txt +++ b/plugins/catalyst/tests/CMakeLists.txt @@ -21,6 +21,7 @@ target_link_libraries(TestPDICatalyst set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests) configure_file(pdi.yml.in pdi.yml) +# find_package(Python3 REQUIRED COMPONENTS Interpreter Development) //To be checked when is necessary find_package(Python3 COMPONENTS Interpreter) add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/") diff --git a/plugins/catalyst/tests/run_test.py b/plugins/catalyst/tests/run_test.py index 73facf916..beb7aa838 100644 --- a/plugins/catalyst/tests/run_test.py +++ b/plugins/catalyst/tests/run_test.py @@ -9,38 +9,47 @@ env = os.environ.copy() env["CATALYST_DATA_DUMP_DIRECTORY"] = binary_folder +env["CATALYST_IMPLEMENTATION_NAME"] = 'stub' # need to get the conduit json file for comparison env["PDI_PLUGIN_PATH"] = binary_folder + '/..' result = subprocess.run([binary_folder + "/TestPDICatalyst", binary_folder + "/pdi.yml"], env=env) if(result.returncode != 0): exit(result.returncode) -# Check the initialize json dump -reference_initialize_json = source_folder + "/references/initialize_reference.json" -actual_initialize_json = binary_folder + "initialize_params.conduit_bin.1.0_json" -with open(reference_initialize_json) as ref_file: - with open(actual_initialize_json) as actual_file: - ref_json = json.load(ref_file) - actual_json = json.load(actual_file) - if ref_json.items() != actual_json.items(): - # Ignore the length of the script path which depends on platform. - actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] +# get endiannes of the computer +endianness = sys.byteorder + +if(endianness == 'little'): + # Check the initialize json dump + reference_initialize_json = source_folder + "/references/initialize_reference.json" + actual_initialize_json = binary_folder + "initialize_params.conduit_bin.1.0_json" + with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) if ref_json.items() != actual_json.items(): - print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') - exit(1) - -# Check the execute json dump -reference_execute_json = source_folder + "/references/execute_reference.json" -for step in range(9): - filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.1.0_json" - if not filecmp.cmp(reference_execute_json, filepath): - print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') - exit(1) + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] + if ref_json.items() != actual_json.items(): + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) -# Check the finalize json dump -reference_finalize_json = source_folder + "/references/finalize_reference.json" -actual_finalize_json = binary_folder + "finalize_params.conduit_bin.1.0_json" -if not filecmp.cmp(reference_finalize_json, actual_finalize_json): - print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') - exit(1) + # Check the execute json dump + reference_execute_json = source_folder + "/references/execute_reference.json" + for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.1.0_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') + exit(1) + + # Check the finalize json dump + reference_finalize_json = source_folder + "/references/finalize_reference.json" + actual_finalize_json = binary_folder + "finalize_params.conduit_bin.1.0_json" + if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + exit(1) +else: + print(f'The reference solution is based on little endian. So it is not possible to check with big endian.') + print(f'The test is marked as failed anyway for the moment.') + exit(1) \ No newline at end of file diff --git a/plugins/catalyst/tests/run_test_mpi.py b/plugins/catalyst/tests/run_test_mpi.py index 48392459a..9a96fe6d7 100644 --- a/plugins/catalyst/tests/run_test_mpi.py +++ b/plugins/catalyst/tests/run_test_mpi.py @@ -10,42 +10,50 @@ env = os.environ.copy() env["CATALYST_DATA_DUMP_DIRECTORY"] = binary_folder +env["CATALYST_IMPLEMENTATION_NAME"] = 'stub' # need to get the conduit json file for comparison env["PDI_PLUGIN_PATH"] = binary_folder + '/..' result = subprocess.run([mpi_exec, "-np", "4", binary_folder + "/TestPDICatalyst", binary_folder + "/pdi.yml"], env=env) if(result.returncode != 0): exit(result.returncode) -# Check the initialize json dump for each rank. -reference_initialize_json = source_folder + "/references/initialize_reference.json" -for rank in range(4): - actual_initialize_json = binary_folder + f"initialize_params.conduit_bin.4.{rank}_json" - with open(reference_initialize_json) as ref_file: - with open(actual_initialize_json) as actual_file: - ref_json = json.load(ref_file) - actual_json = json.load(actual_file) - if ref_json.items() != actual_json.items(): - # Ignore the length of the script path which depends on platform. - actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] +# get endiannes of the computer +endianness = sys.byteorder + +if(endianness == 'little'): + # Check the initialize json dump for each rank. + reference_initialize_json = source_folder + "/references/initialize_reference.json" + for rank in range(4): + actual_initialize_json = binary_folder + f"initialize_params.conduit_bin.4.{rank}_json" + with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) if ref_json.items() != actual_json.items(): - print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') - exit(1) - - -# Check the execute json dump for each rank. -for rank in range(4): - reference_execute_json = source_folder + f"/references/execute_reference_rank{rank}.json" - for step in range(9): - filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.4.{rank}_json" - if not filecmp.cmp(reference_execute_json, filepath): - print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] + if ref_json.items() != actual_json.items(): + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) + + # Check the execute json dump for each rank. + for rank in range(4): + reference_execute_json = source_folder + f"/references/execute_reference_rank{rank}.json" + for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') + exit(1) + + # Check the finalize json dump for each rank. + reference_finalize_json = source_folder + "/references/finalize_reference.json" + for rank in range(4): + actual_finalize_json = binary_folder + f"finalize_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') exit(1) -# Check the finalize json dump for each rank. -reference_finalize_json = source_folder + "/references/finalize_reference.json" -for rank in range(4): - actual_finalize_json = binary_folder + f"finalize_params.conduit_bin.4.{rank}_json" - if not filecmp.cmp(reference_finalize_json, actual_finalize_json): - print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') - exit(1) - +else: + print(f'The reference solution is based on little endian. So it is not possible to check with big endian.') + print(f'The test is marked as failed anyway for the moment.') + exit(1) From a246c8791710cc6d8e2408ebed908005e0554fdd Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Wed, 3 Dec 2025 14:32:19 +0100 Subject: [PATCH 18/31] add copyright header --- plugins/catalyst/CMakeLists.txt | 4 ++++ plugins/catalyst/pdi_catalyst_plugin.cxx | 7 +++++-- plugins/catalyst/tests/pdi.yml.in | 4 ++++ plugins/catalyst/tests/run_test.py | 4 ++++ plugins/catalyst/tests/run_test_mpi.py | 4 ++++ 5 files changed, 21 insertions(+), 2 deletions(-) diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 8d136bf96..3088e2397 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 + cmake_minimum_required(VERSION 3.16...3.29) project(pdi_catalyst_plugin LANGUAGES C CXX) diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 12ac041c9..26ceabcf7 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -1,3 +1,8 @@ +/* +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 +*/ #include "pdi_catalyst_plugin.h" #include "catalyst.hpp" @@ -410,8 +415,6 @@ void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::st } } - - // computer endianness is used conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; diff --git a/plugins/catalyst/tests/pdi.yml.in b/plugins/catalyst/tests/pdi.yml.in index 6ef8e25df..fa86c191e 100644 --- a/plugins/catalyst/tests/pdi.yml.in +++ b/plugins/catalyst/tests/pdi.yml.in @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 + logging: level: "debug" metadata: diff --git a/plugins/catalyst/tests/run_test.py b/plugins/catalyst/tests/run_test.py index beb7aa838..cab7df48d 100644 --- a/plugins/catalyst/tests/run_test.py +++ b/plugins/catalyst/tests/run_test.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 + import subprocess import sys import os diff --git a/plugins/catalyst/tests/run_test_mpi.py b/plugins/catalyst/tests/run_test_mpi.py index 9a96fe6d7..c45ab8895 100644 --- a/plugins/catalyst/tests/run_test_mpi.py +++ b/plugins/catalyst/tests/run_test_mpi.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 + import subprocess import sys import os From d14a0ed5e9d460e5b7180aa54274f9153a0afdb8 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Fri, 5 Dec 2025 10:23:18 +0100 Subject: [PATCH 19/31] change format to snake_case --- plugins/catalyst/pdi_catalyst_plugin.cxx | 989 +++++++++++------------ plugins/catalyst/pdi_catalyst_plugin.h | 49 +- plugins/catalyst/tests/Attributes.cxx | 74 +- plugins/catalyst/tests/Attributes.h | 26 +- plugins/catalyst/tests/CMakeLists.txt | 6 +- plugins/catalyst/tests/Grid.cxx | 141 ++-- plugins/catalyst/tests/Grid.h | 16 +- plugins/catalyst/tests/PDIAdaptor.cxx | 139 ++-- plugins/catalyst/tests/PDIAdaptor.h | 11 +- plugins/catalyst/tests/main.cxx | 71 +- 10 files changed, 723 insertions(+), 799 deletions(-) diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 26ceabcf7..b8ab2f06a 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -10,568 +10,505 @@ #include catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) - : Plugin{ ctx } - , SpecTree(spec_tree) + : Plugin{ctx} + , m_spec_tree(spec_tree) { - ctx.callbacks().add_init_callback([this]() { this->ProcessPDIInit(); }); - ctx.callbacks().add_data_callback( - [this](const std::string& data_name, PDI::Ref ref) { this->ProcessData(data_name, ref); }); - ctx.callbacks().add_event_callback( - [this](const std::string& event_name) { this->ProcessEvent(event_name); }); + ctx.callbacks().add_init_callback([this]() { this->process_PDI_init(); }); + ctx.callbacks().add_data_callback([this](const std::string& data_name, PDI::Ref ref) { this->process_data(data_name, ref); }); + ctx.callbacks().add_event_callback([this](const std::string& event_name) { this->process_event(event_name); }); } catalyst_plugin::~catalyst_plugin() { - RunCatalystFinalize(); + run_catalyst_finalize(); } -void catalyst_plugin::ProcessPDIInit() +void catalyst_plugin::process_PDI_init() { - this->RunCatalystInitialize(); - this->PDIExecuteEventName = this->ReadPDIExecuteEventName(); + this->run_catalyst_initialize(); + this->m_PDI_execute_event_name = this->read_PDI_execute_event_name(); } -void catalyst_plugin::ProcessData(const std::string& data_name, PDI::Ref ref) +void catalyst_plugin::process_data(const std::string& data_name, PDI::Ref ref) { - context().logger().debug("User has shared a data named {}", data_name); - auto it = this->CurrentPDIData.find(data_name); - if (it != this->CurrentPDIData.end()) - { - context().logger().warn( - "Data named '{}' already recorded, the previous value will overwritten.", data_name); - it->second = ref.copy(); - } - else - { - this->CurrentPDIData.emplace(data_name, ref); - } + context().logger().debug("User has shared a data named {}", data_name); + auto it = this->m_current_PDI_data.find(data_name); + if (it != this->m_current_PDI_data.end()) { + context().logger().warn("Data named '{}' already recorded, the previous value will overwritten.", data_name); + it->second = ref.copy(); + } else { + this->m_current_PDI_data.emplace(data_name, ref); + } } -void catalyst_plugin::ProcessEvent(const std::string& event_name) +void catalyst_plugin::process_event(const std::string& event_name) { - if (event_name == this->PDIExecuteEventName) - { - RunCatalystExecute(); - } + if (event_name == this->m_PDI_execute_event_name) { + run_catalyst_execute(); + } } -void catalyst_plugin::RunCatalystInitialize() +void catalyst_plugin::run_catalyst_initialize() { - conduit_cpp::Node node; - auto scripts_node = node["catalyst/scripts"]; - auto scripts_spec = PC_get(this->SpecTree, ".scripts"); - int script_number = 0; - PC_len(scripts_spec, &script_number); - for (int i = 0; i < script_number; ++i) - { - auto key = PC_get(scripts_spec, "{%d}", i); - auto value = PC_get(scripts_spec, "<%d>", i); - scripts_node[PDI::to_string(key)] = PDI::to_string(value); - } - - context().logger().debug("catalyst_initialize call..."); - auto result = catalyst_initialize(conduit_cpp::c_node(&node)); - if (result != catalyst_status_ok) - { - context().logger().error("catalyst_initialize failure"); - } + conduit_cpp::Node node; + auto scripts_node = node["catalyst/scripts"]; + auto scripts_spec = PC_get(this->m_spec_tree, ".scripts"); + int script_number = 0; + PC_len(scripts_spec, &script_number); + for (int i = 0; i < script_number; ++i) { + auto key = PC_get(scripts_spec, "{%d}", i); + auto value = PC_get(scripts_spec, "<%d>", i); + scripts_node[PDI::to_string(key)] = PDI::to_string(value); + } + + context().logger().debug("catalyst_initialize call..."); + auto result = catalyst_initialize(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) { + context().logger().error("catalyst_initialize failure"); + } } -void catalyst_plugin::RunCatalystExecute() +void catalyst_plugin::run_catalyst_execute() { - conduit_cpp::Node node; - - auto execute_spec = PC_get(this->SpecTree, ".execute"); - - // walk the spec tree and create corresponding catalyst nodes. - struct SpecTreeNode - { - PC_tree_t tree; - std::string name; - conduit_node* parentNode; - }; - - std::stack remainingTreeAndParentNode; - remainingTreeAndParentNode.push({ execute_spec, "catalyst", conduit_cpp::c_node(&node) }); - while (!remainingTreeAndParentNode.empty()) - { - auto current = remainingTreeAndParentNode.top(); - remainingTreeAndParentNode.pop(); - - auto current_node = conduit_cpp::cpp_node(current.parentNode)[current.name]; - switch (current.tree.node->type) - { - case YAML_NO_NODE: - context().logger().error("Unsupported Empty YAML Node for variable {}", current.name); - break; - case YAML_SCALAR_NODE: - switch (current.tree.node->data.scalar.style) - { - case YAML_PLAIN_SCALAR_STYLE: - // handle integer or float/double type that doesn't depend on PDI store - { - std::string data_name{ PDI::to_string(current.tree) }; - PDI::Expression data_expression{ PDI::to_string(current.tree) }; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); - - if (auto scalar_datatype = - std::dynamic_pointer_cast(data_type)) - { - FillNodeWithScalarPDIData( - conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); - } - else - { - context().logger().error( - "Unsupported datatype for variable: {}. It should be scalar type.", data_name); - } - } - break; - case YAML_SINGLE_QUOTED_SCALAR_STYLE: - // handle integer or float/double type that depend on scalar PDI data - { - std::string data_name{ PDI::to_string(current.tree) }; - PDI::Expression data_expression{ PDI::to_string(current.tree) }; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); - - if (auto scalar_datatype = - std::dynamic_pointer_cast(data_type)) - { - FillNodeWithScalarPDIData( - conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); - } - else - { - context().logger().error( - "Unsupported datatype for variable: {}. It should be scalar type.", data_name); - } - } - break; - case YAML_DOUBLE_QUOTED_SCALAR_STYLE: - current_node.set_string(PDI::to_string(current.tree)); - break; - case YAML_LITERAL_SCALAR_STYLE: - case YAML_FOLDED_SCALAR_STYLE: - case YAML_ANY_SCALAR_STYLE: - context().logger().error("Unsupported YAML scalar style for variable {}", current.name); - break; - } - break; - case YAML_SEQUENCE_NODE: - context().logger().error("Unsupported Sequence YAML Node for variable {}", current.name); - break; - case YAML_MAPPING_NODE: - int data_tree_size = PDI::len(current.tree); - // Check for dynamic PDI Data array - bool pdi_data_array = false; - for (int i = data_tree_size - 1; i >= 0; --i) - { - auto key = PC_get(current.tree, "{%d}", i); - if (PDI::to_string(key) == "PDI_data_array") - { - this->FillNodeWithPDIDataArray(conduit_cpp::c_node(¤t_node), current.tree); - pdi_data_array = true; - break; // break the loop - } - } - if (pdi_data_array) - { - break; // break the case - } - - // reverse order to get the correct order when poping the stack. - for (int i = data_tree_size - 1; i >= 0; --i) - { - auto key = PC_get(current.tree, "{%d}", i); - auto value = PC_get(current.tree, "<%d>", i); - remainingTreeAndParentNode.push( - { value, PDI::to_string(key), conduit_cpp::c_node(¤t_node) }); - } - break; - } - } - - if (context().logger().level() == spdlog::level::debug || - context().logger().level() == spdlog::level::trace) - { - node.print(); - } - context().logger().debug("catalyst_execute call..."); - auto result = catalyst_execute(conduit_cpp::c_node(&node)); - if (result != catalyst_status_ok) - { - context().logger().error("catalyst_execute failure"); - } - - // clear CurrentPDIData at each iteration - this->CurrentPDIData.clear(); + conduit_cpp::Node node; + + auto execute_spec = PC_get(this->m_spec_tree, ".execute"); + + // walk the spec tree and create corresponding catalyst nodes. + struct Spec_tree_node { + PC_tree_t tree; + std::string name; + conduit_node* parent_node; + }; + + std::stack remaining_tree_and_parent_node; + remaining_tree_and_parent_node.push({execute_spec, "catalyst", conduit_cpp::c_node(&node)}); + while (!remaining_tree_and_parent_node.empty()) { + auto current = remaining_tree_and_parent_node.top(); + remaining_tree_and_parent_node.pop(); + + auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; + switch (current.tree.node->type) { + case YAML_NO_NODE: + context().logger().error("Unsupported Empty YAML Node for variable {}", current.name); + break; + case YAML_SCALAR_NODE: + switch (current.tree.node->data.scalar.style) { + case YAML_PLAIN_SCALAR_STYLE: + // handle integer or float/double type that doesn't depend on PDI store + { + std::string data_name{PDI::to_string(current.tree)}; + PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + auto data_type = spec_ref.type()->evaluate(context()); + + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { + fill_node_with_scalar_PDI_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + } else { + context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + } + } + break; + case YAML_SINGLE_QUOTED_SCALAR_STYLE: + // handle integer or float/double type that depend on scalar PDI data + { + std::string data_name{PDI::to_string(current.tree)}; + PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + auto data_type = spec_ref.type()->evaluate(context()); + + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { + fill_node_with_scalar_PDI_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + } else { + context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + } + } + break; + case YAML_DOUBLE_QUOTED_SCALAR_STYLE: + current_node.set_string(PDI::to_string(current.tree)); + break; + case YAML_LITERAL_SCALAR_STYLE: + case YAML_FOLDED_SCALAR_STYLE: + case YAML_ANY_SCALAR_STYLE: + context().logger().error("Unsupported YAML scalar style for variable {}", current.name); + break; + } + break; + case YAML_SEQUENCE_NODE: + context().logger().error("Unsupported Sequence YAML Node for variable {}", current.name); + break; + case YAML_MAPPING_NODE: + int data_tree_size = PDI::len(current.tree); + // Check for dynamic PDI Data array + bool pdi_data_array = false; + for (int i = data_tree_size - 1; i >= 0; --i) { + auto key = PC_get(current.tree, "{%d}", i); + if (PDI::to_string(key) == "PDI_data_array") { + this->fill_node_with_PDI_data_array(conduit_cpp::c_node(¤t_node), current.tree); + pdi_data_array = true; + break; // break the loop + } + } + if (pdi_data_array) { + break; // break the case + } + + // reverse order to get the correct order when poping the stack. + for (int i = data_tree_size - 1; i >= 0; --i) { + auto key = PC_get(current.tree, "{%d}", i); + auto value = PC_get(current.tree, "<%d>", i); + remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node)}); + } + break; + } + } + + if (context().logger().level() == spdlog::level::debug || context().logger().level() == spdlog::level::trace) { + node.print(); + } + context().logger().debug("catalyst_execute call..."); + auto result = catalyst_execute(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) { + context().logger().error("catalyst_execute failure"); + } + + // clear m_current_PDI_data at each iteration + this->m_current_PDI_data.clear(); } -void catalyst_plugin::RunCatalystFinalize() +void catalyst_plugin::run_catalyst_finalize() { - context().logger().debug("catalyst_finalize call..."); - conduit_cpp::Node node; - auto result = catalyst_finalize(conduit_cpp::c_node(&node)); - if (result != catalyst_status_ok) - { - context().logger().error("catalyst_finalize failure"); - } + context().logger().debug("catalyst_finalize call..."); + conduit_cpp::Node node; + auto result = catalyst_finalize(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) { + context().logger().error("catalyst_finalize failure"); + } } -void catalyst_plugin::FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tree) +void catalyst_plugin::fill_node_with_PDI_data_array(conduit_node* node, PC_tree_t tree) { - auto name_spec = PC_get(tree, ".PDI_data_array"); - if (PC_status(name_spec)) - { - context().logger().error("No \"name\" child in PDI_data_array spec."); - return; - } - - std::string name = PDI::to_string(name_spec); - - auto it = this->CurrentPDIData.find(name); - if (it == this->CurrentPDIData.end()) - { - context().logger().error("Can't find the PDI_data named: {}", name); - return; - } - auto ref = it->second; - PDI::Ref_r ref_r{ ref }; - - if (!ref_r) - { - context().logger().error("The PDIData named \"{}\" is not readable.", name); - return; - } - - auto data_type = ref_r.type(); - if (auto array_datatype = std::dynamic_pointer_cast(data_type)) - { - FillNodeWithArrayPDIData(node, name, tree, *array_datatype, ref_r); - } - else - { - context().logger().error( - "Unsupported datatype for variable: {}. The type should be array type.", name); - } + auto name_spec = PC_get(tree, ".PDI_data_array"); + if (PC_status(name_spec)) { + context().logger().error("No \"name\" child in PDI_data_array spec."); + return; + } + + std::string name = PDI::to_string(name_spec); + + auto it = this->m_current_PDI_data.find(name); + if (it == this->m_current_PDI_data.end()) { + context().logger().error("Can't find the PDI_data named: {}", name); + return; + } + auto ref = it->second; + PDI::Ref_r ref_r{ref}; + + if (!ref_r) { + context().logger().error("The PDIData named \"{}\" is not readable.", name); + return; + } + + auto data_type = ref_r.type(); + if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { + fill_node_with_array_PDI_data(node, name, tree, *array_datatype, ref_r); + } else { + context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); + } } -void catalyst_plugin::FillNodeWithScalarPDIData(conduit_node* node, const std::string& name, - const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r) +void catalyst_plugin::fill_node_with_scalar_PDI_data( + conduit_node* node, + const std::string& name, + const PDI::Scalar_datatype& scalar_datatype, + PDI::Ref_r& ref_r +) { - PDI::Scalar_kind scalar_kind = scalar_datatype.kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) - { - auto buffer_size = scalar_datatype.buffersize(); - if (buffer_size == sizeof(conduit_int8)) - { - catalyst_conduit_node_set_int8(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_int16)) - { - catalyst_conduit_node_set_int16(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_int32)) - { - catalyst_conduit_node_set_int32(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_int64)) - { - catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); - } - else - { - context().logger().error( - "Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); - } - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) - { - auto buffer_size = scalar_datatype.buffersize(); - if (buffer_size == sizeof(conduit_uint8)) - { - catalyst_conduit_node_set_uint8(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_uint16)) - { - catalyst_conduit_node_set_uint16(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_uint32)) - { - catalyst_conduit_node_set_uint32(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_uint64)) - { - catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); - } - else - { - context().logger().error( - "Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); - } - } - else if (scalar_kind == PDI::Scalar_kind::FLOAT) - { - auto buffer_size = scalar_datatype.buffersize(); - if (buffer_size == sizeof(conduit_float32)) - { - catalyst_conduit_node_set_float32(node, *static_cast(ref_r.get())); - } - else if (buffer_size == sizeof(conduit_float64)) - { - catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); - } - else - { - context().logger().error( - "Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); - } - } - else - { - context().logger().error("Unknown Scalar Type for variable {}", name); - } + PDI::Scalar_kind scalar_kind = scalar_datatype.kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_int8)) { + catalyst_conduit_node_set_int8(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_int16)) { + catalyst_conduit_node_set_int16(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_int32)) { + catalyst_conduit_node_set_int32(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_int64)) { + catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); + } else { + context().logger().error("Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + } + } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_uint8)) { + catalyst_conduit_node_set_uint8(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_uint16)) { + catalyst_conduit_node_set_uint16(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_uint32)) { + catalyst_conduit_node_set_uint32(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_uint64)) { + catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); + } else { + context().logger().error("Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + } + } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { + auto buffer_size = scalar_datatype.buffersize(); + if (buffer_size == sizeof(conduit_float32)) { + catalyst_conduit_node_set_float32(node, *static_cast(ref_r.get())); + } else if (buffer_size == sizeof(conduit_float64)) { + catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); + } else { + context().logger().error("Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + } + } else { + context().logger().error("Unknown Scalar Type for variable {}", name); + } } -void catalyst_plugin::FillNodeWithArrayPDIData(conduit_node* node, const std::string& name, - PC_tree_t& tree, const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r) +void catalyst_plugin::fill_node_with_array_PDI_data( + conduit_node* node, + const std::string& name, + PC_tree_t& tree, + const PDI::Array_datatype& array_datatype, + PDI::Ref_r& ref_r +) { - PDI::Datatype_sptr type = array_datatype.subtype(); - while (auto&& array_type = std::dynamic_pointer_cast(type)) - { - type = array_type->subtype(); - } - auto scalar_datatype = std::dynamic_pointer_cast(type); - if (!scalar_datatype) - { - context().logger().error("Array subtype of variable {} should be scalar type.", name); - return; - } - - conduit_index_t num_elements = 0; - auto size_spec = PC_get(tree, ".size"); - if (PC_status(size_spec) == PC_OK) - { - if (std::is_same::value) - { - num_elements = GetLongValueFromSpecNode(size_spec, name); - } - else - { - // case conduit_index_t is 32-bits - long tmp_num_elements = GetLongValueFromSpecNode(size_spec, name); - num_elements = static_cast(tmp_num_elements); - if (num_elements != tmp_num_elements) - { - context().logger().error("Error in cast of a type conduit_index_t in long. {} != {}", num_elements, tmp_num_elements); - } - } - } - else - { - context().logger().error( - "Unknown the number of elements for variable{} passed to catalyst.", name); - } - - conduit_index_t offset = 0; - auto offset_spec = PC_get(tree, ".offset"); - if (PC_status(offset_spec) == PC_OK) - { - if (std::is_same::value) - { - offset = GetLongValueFromSpecNode(offset_spec, name); - } - else - { - // case conduit_index_t is 32-bits - long tmp_offset = GetLongValueFromSpecNode(offset_spec, name); - offset= static_cast(tmp_offset); - if (offset != tmp_offset) - { - context().logger().error("Error in cast of a type long in conduit_index_t {} != {}", offset, tmp_offset); - } - } - } - - conduit_index_t stride = 1; - auto stride_spec = PC_get(tree, ".stride"); - if (PC_status(stride_spec) == PC_OK) - { - if (std::is_same::value) - { - stride = GetLongValueFromSpecNode(stride_spec, name); - } - else - { - // case conduit_index_t is 32-bits - long tmp_stride = GetLongValueFromSpecNode(stride_spec, name); - stride = static_cast(tmp_stride); - if (stride != tmp_stride) - { - context().logger().error("Error in cast of a type long to conduit_index_t {} != {}", stride, tmp_stride); - } - } - } - - // computer endianness is used - conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; - - PDI::Scalar_kind scalar_kind = scalar_datatype->kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) - { - auto buffer_size = scalar_datatype->buffersize(); - if (buffer_size == sizeof(conduit_int8)) - { - conduit_index_t element_bytes = 1; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_int8_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_int16)) - { - conduit_index_t element_bytes = 2; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_int16_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_int32)) - { - conduit_index_t element_bytes = 4; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_int32_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_int64)) - { - conduit_index_t element_bytes = 8; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_int64_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else - { - context().logger().error( - "Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); - } - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) - { - auto buffer_size = scalar_datatype->buffersize(); - if (buffer_size == sizeof(conduit_uint8)) - { - conduit_index_t element_bytes = 1; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_uint8_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_uint16)) - { - conduit_index_t element_bytes = 2; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_uint16_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_uint32)) - { - conduit_index_t element_bytes = 4; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_uint32_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_uint64)) - { - conduit_index_t element_bytes = 8; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_uint64_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else - { - context().logger().error( - "Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); - } - } - else if (scalar_kind == PDI::Scalar_kind::FLOAT) - { - auto buffer_size = scalar_datatype->buffersize(); - if (buffer_size == sizeof(conduit_float32)) - { - conduit_index_t element_bytes = 4; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_float32_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else if (buffer_size == sizeof(conduit_float64)) - { - conduit_index_t element_bytes = 8; - auto pointer = const_cast(static_cast(ref_r.get())); - catalyst_conduit_node_set_external_float64_ptr_detailed(node, pointer, num_elements, - offset * element_bytes, stride * element_bytes, element_bytes, endianness); - } - else - { - context().logger().error( - "Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); - } - } - else - { - context().logger().error("Unknown Scalar Type for variable {}", name); - } + PDI::Datatype_sptr type = array_datatype.subtype(); + while (auto&& array_type = std::dynamic_pointer_cast(type)) { + type = array_type->subtype(); + } + auto scalar_datatype = std::dynamic_pointer_cast(type); + if (!scalar_datatype) { + context().logger().error("Array subtype of variable {} should be scalar type.", name); + return; + } + + conduit_index_t num_elements = 0; + auto size_spec = PC_get(tree, ".size"); + if (PC_status(size_spec) == PC_OK) { + if (std::is_same::value) { + num_elements = get_long_value_from_spec_node(size_spec, name); + } else { + // case conduit_index_t is 32-bits + long tmp_num_elements = get_long_value_from_spec_node(size_spec, name); + num_elements = static_cast(tmp_num_elements); + if (num_elements != tmp_num_elements) { + context().logger().error("Error in cast of a type conduit_index_t in long. {} != {}", num_elements, tmp_num_elements); + } + } + } else { + context().logger().error("Unknown the number of elements for variable{} passed to catalyst.", name); + } + + conduit_index_t offset = 0; + auto offset_spec = PC_get(tree, ".offset"); + if (PC_status(offset_spec) == PC_OK) { + if (std::is_same::value) { + offset = get_long_value_from_spec_node(offset_spec, name); + } else { + // case conduit_index_t is 32-bits + long tmp_offset = get_long_value_from_spec_node(offset_spec, name); + offset = static_cast(tmp_offset); + if (offset != tmp_offset) { + context().logger().error("Error in cast of a type long in conduit_index_t {} != {}", offset, tmp_offset); + } + } + } + + conduit_index_t stride = 1; + auto stride_spec = PC_get(tree, ".stride"); + if (PC_status(stride_spec) == PC_OK) { + if (std::is_same::value) { + stride = get_long_value_from_spec_node(stride_spec, name); + } else { + // case conduit_index_t is 32-bits + long tmp_stride = get_long_value_from_spec_node(stride_spec, name); + stride = static_cast(tmp_stride); + if (stride != tmp_stride) { + context().logger().error("Error in cast of a type long to conduit_index_t {} != {}", stride, tmp_stride); + } + } + } + + // computer endianness is used + conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; + + PDI::Scalar_kind scalar_kind = scalar_datatype->kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_int8)) { + conduit_index_t element_bytes = 1; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int8_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_int16)) { + conduit_index_t element_bytes = 2; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int16_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_int32)) { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int32_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_int64)) { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_int64_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else { + context().logger().error("Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + } + } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_uint8)) { + conduit_index_t element_bytes = 1; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint8_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_uint16)) { + conduit_index_t element_bytes = 2; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint16_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_uint32)) { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint32_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_uint64)) { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_uint64_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else { + context().logger().error("Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + } + } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { + auto buffer_size = scalar_datatype->buffersize(); + if (buffer_size == sizeof(conduit_float32)) { + conduit_index_t element_bytes = 4; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_float32_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else if (buffer_size == sizeof(conduit_float64)) { + conduit_index_t element_bytes = 8; + auto pointer = const_cast(static_cast(ref_r.get())); + catalyst_conduit_node_set_external_float64_ptr_detailed( + node, + pointer, + num_elements, + offset * element_bytes, + stride * element_bytes, + element_bytes, + endianness + ); + } else { + context().logger().error("Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + } + } else { + context().logger().error("Unknown Scalar Type for variable {}", name); + } } -long catalyst_plugin::GetLongValueFromSpecNode(PC_tree_t& spec, const std::string& name) +long catalyst_plugin::get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name) { - if (spec.node->type == YAML_SCALAR_NODE) - { - PDI::Expression data_expression{ PDI::to_string(spec) }; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - if (!spec_ref) - { - context().logger().error("The PDIData named \"{}\" is not readable.", name); - return 0; - } - auto data_type = spec_ref.type()->evaluate(context()); - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) - { - PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) - { - return data_expression.to_long(context()); - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) - { - return data_expression.to_long(context()); - } - else - { - context().logger().error( - "Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); - } - } - else - { - context().logger().error( - "The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); - } - return 0; - } - else - { - context().logger().error("Supported only YAML_SCALAR_NODE for variable {}", name); - } - return 0; + if (spec.node->type == YAML_SCALAR_NODE) { + PDI::Expression data_expression{PDI::to_string(spec)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + if (!spec_ref) { + context().logger().error("The PDIData named \"{}\" is not readable.", name); + return 0; + } + auto data_type = spec_ref.type()->evaluate(context()); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { + PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED) { + return data_expression.to_long(context()); + } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { + return data_expression.to_long(context()); + } else { + context().logger().error("Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); + } + } else { + context().logger().error("The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); + } + return 0; + } else { + context().logger().error("Supported only YAML_SCALAR_NODE for variable {}", name); + } + return 0; } -std::string catalyst_plugin::ReadPDIExecuteEventName() +std::string catalyst_plugin::read_PDI_execute_event_name() { - std::string eventName; - auto execute_spec = PC_get(this->SpecTree, ".on_event"); - if (PC_status(execute_spec) == PC_OK) - { - eventName = PDI::to_string(execute_spec); - } - return eventName; + std::string event_name; + auto execute_spec = PC_get(this->m_spec_tree, ".on_event"); + if (PC_status(execute_spec) == PC_OK) { + event_name = PDI::to_string(execute_spec); + } + return event_name; } diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index 43d2df9a5..41547db25 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -15,35 +15,38 @@ typedef struct conduit_node_impl conduit_node; * from PDI API calls (PDI_init, PDI_multi_expose, PDI_finalize). * * It leverages the specification tree to copy only pointer to data. The conduit node structure of - * the catalyst_execute call is defined in the spec tree, and dynamic data are referenced with the - * special keyword "PDI_data". + * the catalyst_execute call is defined in the spec tree. * */ -class catalyst_plugin : public PDI::Plugin +class catalyst_plugin: public PDI::Plugin { public: - catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree); - ~catalyst_plugin(); + catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree); + ~catalyst_plugin(); private: - void ProcessPDIInit(); - void ProcessData(const std::string& data_name, PDI::Ref ref); - void ProcessEvent(const std::string& event_name); - - void RunCatalystInitialize(); - void RunCatalystExecute(); - void RunCatalystFinalize(); - void FillNodeWithPDIDataArray(conduit_node* node, PC_tree_t tree); - void FillNodeWithScalarPDIData(conduit_node* node, const std::string& name, - const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); - void FillNodeWithArrayPDIData(conduit_node* node, const std::string& name, PC_tree_t& tree, - const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r); - long GetLongValueFromSpecNode(PC_tree_t& spec, const std::string& name); - std::string ReadPDIExecuteEventName(); - - PC_tree_t SpecTree; - std::unordered_map CurrentPDIData; - std::string PDIExecuteEventName; + void process_PDI_init(); + void process_data(const std::string& data_name, PDI::Ref ref); + void process_event(const std::string& event_name); + + void run_catalyst_initialize(); + void run_catalyst_execute(); + void run_catalyst_finalize(); + void fill_node_with_PDI_data_array(conduit_node* node, PC_tree_t tree); + void fill_node_with_scalar_PDI_data(conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); + void fill_node_with_array_PDI_data( + conduit_node* node, + const std::string& name, + PC_tree_t& tree, + const PDI::Array_datatype& array_datatype, + PDI::Ref_r& ref_r + ); + long get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name); + std::string read_PDI_execute_event_name(); + + PC_tree_t m_spec_tree; + std::unordered_map m_current_PDI_data; + std::string m_PDI_execute_event_name; }; PDI_PLUGIN(catalyst) diff --git a/plugins/catalyst/tests/Attributes.cxx b/plugins/catalyst/tests/Attributes.cxx index ba837a5cf..d635d58a8 100644 --- a/plugins/catalyst/tests/Attributes.cxx +++ b/plugins/catalyst/tests/Attributes.cxx @@ -4,57 +4,51 @@ Attributes::Attributes(Grid* grid) { - this->GridPtr = grid; + this->m_grid_ptr = grid; } Attributes::~Attributes() { - this->GridPtr = nullptr; + this->m_grid_ptr = nullptr; } -void Attributes::UpdateFields(double time) +void Attributes::update_fields(double time) { - size_t numPoints = this->GridPtr->GetNumberOfPoints(); - this->Velocity.resize(numPoints * 3); - for (size_t pt = 0; pt < numPoints; pt++) - { - const double* coord = this->GridPtr->GetPoint(pt); - this->Velocity[pt] = coord[1] * time; - } - std::fill(this->Velocity.begin() + numPoints, this->Velocity.end(), 0.); - - size_t numCells = this->GridPtr->GetNumberOfCells(); - this->Pressure.resize(numCells); - - double tmp_var = (numCells * time * 0.5); - size_t first_cells; - if (tmp_var < 0) - { - first_cells = 0; - } - else - { - first_cells = (size_t)tmp_var; - } - - std::fill(this->Pressure.begin(), this->Pressure.end(), -1.f); - std::fill(this->Pressure.begin() + first_cells, this->Pressure.end(), 1.f); + size_t num_points = this->m_grid_ptr->get_number_of_points(); + this->m_velocity.resize(num_points * 3); + for (size_t pt = 0; pt < num_points; pt++) { + const double* coord = this->m_grid_ptr->get_point(pt); + this->m_velocity[pt] = coord[1] * time; + } + std::fill(this->m_velocity.begin() + num_points, this->m_velocity.end(), 0.); + + size_t num_cells = this->m_grid_ptr->get_number_of_cells(); + this->m_pressure.resize(num_cells); + + double tmp_var = (num_cells * time * 0.5); + size_t first_cells; + if (tmp_var < 0) { + first_cells = 0; + } else { + first_cells = (size_t)tmp_var; + } + + std::fill(this->m_pressure.begin(), this->m_pressure.end(), -1.f); + std::fill(this->m_pressure.begin() + first_cells, this->m_pressure.end(), 1.f); } -double* Attributes::GetVelocityArray() +double* Attributes::get_velocity_array() { - if (this->Velocity.empty()) - { - return nullptr; - } - return &this->Velocity[0]; + if (this->m_velocity.empty()) { + return nullptr; + } + return &this->m_velocity[0]; } -float* Attributes::GetPressureArray() +float* Attributes::get_pressure_array() { - if (this->Pressure.empty()) - { - return nullptr; - } - return &this->Pressure[0]; + if (this->m_pressure.empty()) { + return nullptr; + } + return &this->m_pressure[0]; } diff --git a/plugins/catalyst/tests/Attributes.h b/plugins/catalyst/tests/Attributes.h index 7dafc58e7..e5680ea84 100644 --- a/plugins/catalyst/tests/Attributes.h +++ b/plugins/catalyst/tests/Attributes.h @@ -6,22 +6,22 @@ class Grid; class Attributes { - // A class for generating and storing point and cell fields. - // Velocity is stored at the points and pressure is stored - // for the cells. The current velocity profile is for a - // shearing flow with U(y,t) = y*t, V = 0 and W = 0. - // Pressure is constant through the domain. + // A class for generating and storing point and cell fields. + // Velocity is stored at the points and pressure is stored + // for the cells. The current velocity profile is for a + // shearing flow with U(y,t) = y*t, V = 0 and W = 0. + // Pressure is constant through the domain. public: - Attributes(Grid* grid); - ~Attributes(); - void UpdateFields(double time); - double* GetVelocityArray(); - float* GetPressureArray(); + Attributes(Grid* grid); + ~Attributes(); + void update_fields(double time); + double* get_velocity_array(); + float* get_pressure_array(); private: - std::vector Velocity; - std::vector Pressure; - Grid* GridPtr; + std::vector m_velocity; + std::vector m_pressure; + Grid* m_grid_ptr; }; #endif // ATTRIBUTES_H diff --git a/plugins/catalyst/tests/CMakeLists.txt b/plugins/catalyst/tests/CMakeLists.txt index f2a48e851..cb44f0ef2 100644 --- a/plugins/catalyst/tests/CMakeLists.txt +++ b/plugins/catalyst/tests/CMakeLists.txt @@ -12,11 +12,7 @@ add_executable(TestPDICatalyst find_package(MPI COMPONENTS C CXX REQUIRED) target_link_libraries(TestPDICatalyst - PRIVATE - MPI::MPI_C - MPI::MPI_CXX - paraconf::paraconf - PDI::pdi) + PRIVATE PDI::PDI_C MPI::MPI_C) set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests) configure_file(pdi.yml.in pdi.yml) diff --git a/plugins/catalyst/tests/Grid.cxx b/plugins/catalyst/tests/Grid.cxx index 706998473..3d0378e77 100644 --- a/plugins/catalyst/tests/Grid.cxx +++ b/plugins/catalyst/tests/Grid.cxx @@ -1,101 +1,90 @@ #include "Grid.h" +#include #include #include #include -#include -Grid::Grid(const unsigned int numPoints[3], const double spacing[3]) +Grid::Grid(const unsigned int num_points[3], const double spacing[3]) { - if (numPoints[0] == 0 || numPoints[1] == 0 || numPoints[2] == 0) - { - throw std::runtime_error("Must have a non-zero amount of points in each direction."); - } - // in parallel, we do a simple partitioning in the x-direction. - int mpiSize = 1; - int mpiRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); - MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + if (num_points[0] == 0 || num_points[1] == 0 || num_points[2] == 0) { + throw std::runtime_error("Must have a non-zero amount of points in each direction."); + } + // in parallel, we do a simple partitioning in the x-direction. + int mpi_size = 1; + int mpi_rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - unsigned int startXPoint = mpiRank * numPoints[0] / mpiSize; - unsigned int endXPoint = (mpiRank + 1) * numPoints[0] / mpiSize; - if (mpiSize != mpiRank + 1) - { - endXPoint++; - } + unsigned int start_x_point = mpi_rank * num_points[0] / mpi_size; + unsigned int end_x_point = (mpi_rank + 1) * num_points[0] / mpi_size; + if (mpi_size != mpi_rank + 1) { + end_x_point++; + } - // create the points -- slowest in the x and fastest in the z directions - double coord[3] = { 0, 0, 0 }; - for (unsigned int i = startXPoint; i < endXPoint; i++) - { - coord[0] = i * spacing[0]; - for (unsigned int j = 0; j < numPoints[1]; j++) - { - coord[1] = j * spacing[1]; - for (unsigned int k = 0; k < numPoints[2]; k++) - { - coord[2] = k * spacing[2]; - // add the coordinate to the end of the vector - std::copy(coord, coord + 3, std::back_inserter(this->Points)); - } - } - } - // create the hex cells - unsigned int cellPoints[8]; - unsigned int numXPoints = endXPoint - startXPoint; - for (unsigned int i = 0; i < numXPoints - 1; i++) - { - for (unsigned int j = 0; j < numPoints[1] - 1; j++) - { - for (unsigned int k = 0; k < numPoints[2] - 1; k++) - { - cellPoints[0] = i * numPoints[1] * numPoints[2] + j * numPoints[2] + k; - cellPoints[1] = (i + 1) * numPoints[1] * numPoints[2] + j * numPoints[2] + k; - cellPoints[2] = (i + 1) * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k; - cellPoints[3] = i * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k; - cellPoints[4] = i * numPoints[1] * numPoints[2] + j * numPoints[2] + k + 1; - cellPoints[5] = (i + 1) * numPoints[1] * numPoints[2] + j * numPoints[2] + k + 1; - cellPoints[6] = (i + 1) * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k + 1; - cellPoints[7] = i * numPoints[1] * numPoints[2] + (j + 1) * numPoints[2] + k + 1; - std::copy(cellPoints, cellPoints + 8, std::back_inserter(this->Cells)); - } - } - } + // create the points -- slowest in the x and fastest in the z directions + double coord[3] = {0, 0, 0}; + for (unsigned int i = start_x_point; i < end_x_point; i++) { + coord[0] = i * spacing[0]; + for (unsigned int j = 0; j < num_points[1]; j++) { + coord[1] = j * spacing[1]; + for (unsigned int k = 0; k < num_points[2]; k++) { + coord[2] = k * spacing[2]; + // add the coordinate to the end of the vector + std::copy(coord, coord + 3, std::back_inserter(this->m_points)); + } + } + } + // create the hex cells + unsigned int cell_points[8]; + unsigned int numXPoints = end_x_point - start_x_point; + for (unsigned int i = 0; i < numXPoints - 1; i++) { + for (unsigned int j = 0; j < num_points[1] - 1; j++) { + for (unsigned int k = 0; k < num_points[2] - 1; k++) { + cell_points[0] = i * num_points[1] * num_points[2] + j * num_points[2] + k; + cell_points[1] = (i + 1) * num_points[1] * num_points[2] + j * num_points[2] + k; + cell_points[2] = (i + 1) * num_points[1] * num_points[2] + (j + 1) * num_points[2] + k; + cell_points[3] = i * num_points[1] * num_points[2] + (j + 1) * num_points[2] + k; + cell_points[4] = i * num_points[1] * num_points[2] + j * num_points[2] + k + 1; + cell_points[5] = (i + 1) * num_points[1] * num_points[2] + j * num_points[2] + k + 1; + cell_points[6] = (i + 1) * num_points[1] * num_points[2] + (j + 1) * num_points[2] + k + 1; + cell_points[7] = i * num_points[1] * num_points[2] + (j + 1) * num_points[2] + k + 1; + std::copy(cell_points, cell_points + 8, std::back_inserter(this->m_cells)); + } + } + } } -size_t Grid::GetNumberOfPoints() const +size_t Grid::get_number_of_points() const { - return this->Points.size() / 3; + return this->m_points.size() / 3; } -size_t Grid::GetNumberOfCells() const +size_t Grid::get_number_of_cells() const { - return this->Cells.size() / 8; + return this->m_cells.size() / 8; } -const double* Grid::GetPointsArray() const +const double* Grid::get_points_array() const { - if (this->Points.empty()) - { - return nullptr; - } - return this->Points.data(); + if (this->m_points.empty()) { + return nullptr; + } + return this->m_points.data(); } -const double* Grid::GetPoint(size_t pointId) const +const double* Grid::get_point(size_t pointId) const { - if (pointId >= this->Points.size()) - { - return nullptr; - } - return &(this->Points[pointId * 3]); + if (pointId >= this->m_points.size()) { + return nullptr; + } + return &(this->m_points[pointId * 3]); } -const unsigned int* Grid::GetCellPoints(size_t cellId) const +const unsigned int* Grid::get_cell_points(size_t cellId) const { - if (cellId >= this->Cells.size()) - { - return nullptr; - } - return &(this->Cells[cellId * 8]); + if (cellId >= this->m_cells.size()) { + return nullptr; + } + return &(this->m_cells[cellId * 8]); } diff --git a/plugins/catalyst/tests/Grid.h b/plugins/catalyst/tests/Grid.h index 83ee2919f..0c2e96782 100644 --- a/plugins/catalyst/tests/Grid.h +++ b/plugins/catalyst/tests/Grid.h @@ -7,16 +7,16 @@ class Grid { public: - Grid(const unsigned int numPoints[3], const double spacing[3]); - size_t GetNumberOfPoints() const; - size_t GetNumberOfCells() const; - const double* GetPointsArray() const; - const double* GetPoint(size_t pointId) const; - const unsigned int* GetCellPoints(size_t cellId) const; + Grid(const unsigned int num_points[3], const double spacing[3]); + size_t get_number_of_points() const; + size_t get_number_of_cells() const; + const double* get_points_array() const; + const double* get_point(size_t pointId) const; + const unsigned int* get_cell_points(size_t cellId) const; private: - std::vector Points; - std::vector Cells; + std::vector m_points; + std::vector m_cells; }; #endif diff --git a/plugins/catalyst/tests/PDIAdaptor.cxx b/plugins/catalyst/tests/PDIAdaptor.cxx index de383212e..0ce1e57ad 100644 --- a/plugins/catalyst/tests/PDIAdaptor.cxx +++ b/plugins/catalyst/tests/PDIAdaptor.cxx @@ -4,85 +4,96 @@ #include "Grid.h" #include -#include #include +#include -namespace PDIAdaptor -{ +namespace PDI_adaptor { -bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) +bool initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) { - PC_tree_t conf = PC_parse_path(pdi_yaml_config_file_path.c_str()); - auto status = PDI_init(PC_get(conf, "")); - if (status != PDI_status_t::PDI_OK) - { - return false; - } + PC_tree_t conf = PC_parse_path(pdi_yaml_config_file_path.c_str()); + auto status = PDI_init(PC_get(conf, "")); + if (status != PDI_status_t::PDI_OK) { + return false; + } - auto points_array_size = grid.GetNumberOfPoints() * 3; - status = PDI_expose("points_array_size", &points_array_size, PDI_OUT); - if (status != PDI_status_t::PDI_OK) - { - return false; - } + auto points_array_size = grid.get_number_of_points() * 3; + status = PDI_expose("points_array_size", &points_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) { + return false; + } - auto number_of_cells = grid.GetNumberOfCells() * 8; - status = PDI_expose("cell_points_size", &number_of_cells, PDI_OUT); - if (status != PDI_status_t::PDI_OK) - { - return false; - } + auto number_of_cells = grid.get_number_of_cells() * 8; + status = PDI_expose("cell_points_size", &number_of_cells, PDI_OUT); + if (status != PDI_status_t::PDI_OK) { + return false; + } - auto velocity_array_size = grid.GetNumberOfPoints() * 3; - status = PDI_expose("velocity_array_size", &velocity_array_size, PDI_OUT); - if (status != PDI_status_t::PDI_OK) - { - return false; - } + auto velocity_array_size = grid.get_number_of_points() * 3; + status = PDI_expose("velocity_array_size", &velocity_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) { + return false; + } - auto pressure_array_size = grid.GetNumberOfCells(); - status = PDI_expose("pressure_array_size", &pressure_array_size, PDI_OUT); - if (status != PDI_status_t::PDI_OK) - { - return false; - } + auto pressure_array_size = grid.get_number_of_cells(); + status = PDI_expose("pressure_array_size", &pressure_array_size, PDI_OUT); + if (status != PDI_status_t::PDI_OK) { + return false; + } - return true; + return true; } -bool Execute(int cycle, double time, Grid& grid, Attributes& attribs) +bool execute(int cycle, double time, Grid& grid, Attributes& attribs) { - auto number_of_points = grid.GetNumberOfPoints(); - auto number_of_cells = grid.GetNumberOfCells(); + auto number_of_points = grid.get_number_of_points(); + auto number_of_cells = grid.get_number_of_cells(); - auto status = PDI_multi_expose( - // - "catalyst_execute", - // - "cycle", &cycle, PDI_OUT, - // - "time", &time, PDI_OUT, - // - "number_of_points", &number_of_points, PDI_OUT, - // - "points_array", grid.GetPointsArray(), PDI_OUT, - // - "number_of_cells", &number_of_cells, PDI_OUT, - // - "cell_points", grid.GetCellPoints(0), PDI_OUT, - // - "velocity_array", attribs.GetVelocityArray(), PDI_OUT, - // - "pressure_array", attribs.GetPressureArray(), PDI_OUT, - // - NULL); + auto status = PDI_multi_expose( + // + "catalyst_execute", + // + "cycle", + &cycle, + PDI_OUT, + // + "time", + &time, + PDI_OUT, + // + "number_of_points", + &number_of_points, + PDI_OUT, + // + "points_array", + grid.get_points_array(), + PDI_OUT, + // + "number_of_cells", + &number_of_cells, + PDI_OUT, + // + "cell_points", + grid.get_cell_points(0), + PDI_OUT, + // + "velocity_array", + attribs.get_velocity_array(), + PDI_OUT, + // + "pressure_array", + attribs.get_pressure_array(), + PDI_OUT, + // + NULL + ); - return status == PDI_status_t::PDI_OK; + return status == PDI_status_t::PDI_OK; } -bool Finalize() +bool finalize() { - auto status = PDI_finalize(); - return status == PDI_status_t::PDI_OK; -} + auto status = PDI_finalize(); + return status == PDI_status_t::PDI_OK; } +} // namespace PDI_adaptor diff --git a/plugins/catalyst/tests/PDIAdaptor.h b/plugins/catalyst/tests/PDIAdaptor.h index a00dbfbfc..2cfcdbcad 100644 --- a/plugins/catalyst/tests/PDIAdaptor.h +++ b/plugins/catalyst/tests/PDIAdaptor.h @@ -6,11 +6,10 @@ class Grid; class Attributes; -namespace PDIAdaptor -{ -bool Initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid); -bool Execute(int cycle, double time, Grid& grid, Attributes& attribs); -bool Finalize(); -} +namespace PDI_adaptor { +bool initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid); +bool execute(int cycle, double time, Grid& grid, Attributes& attribs); +bool finalize(); +} // namespace PDI_adaptor #endif diff --git a/plugins/catalyst/tests/main.cxx b/plugins/catalyst/tests/main.cxx index 6330e2a65..7aa4f0a48 100644 --- a/plugins/catalyst/tests/main.cxx +++ b/plugins/catalyst/tests/main.cxx @@ -2,52 +2,47 @@ #include "Grid.h" #include "PDIAdaptor.h" +#include #include #include -#include int main(int argc, char* argv[]) { - MPI_Init(&argc, &argv); - unsigned int numPoints[3] = { 70, 60, 44 }; - double spacing[3] = { 1, 1.1, 1.3 }; - Grid grid(numPoints, spacing); - Attributes attributes(&grid); + MPI_Init(&argc, &argv); + unsigned int num_points[3] = {70, 60, 44}; + double spacing[3] = {1, 1.1, 1.3}; + Grid grid(num_points, spacing); + Attributes attributes(&grid); - if (argc < 2) - { - std::cerr << "expecting the pdi yaml config as argument" << std::endl; - return EXIT_FAILURE; - } - auto code = PDIAdaptor::Initialize(std::string(argv[1]), grid); - if (!code) - { - std::cerr << "PDIAdaptor::Initialize failure" << std::endl; - return EXIT_FAILURE; - } + if (argc < 2) { + std::cerr << "expecting the pdi yaml config as argument" << std::endl; + return EXIT_FAILURE; + } + auto code = PDI_adaptor::initialize(std::string(argv[1]), grid); + if (!code) { + std::cerr << "PDIAdaptor::Initialize failure" << std::endl; + return EXIT_FAILURE; + } - unsigned int numberOfTimeSteps = 10; - for (unsigned int timeStep = 0; timeStep < numberOfTimeSteps; timeStep++) - { - // use a time step length of 0.1 - double time = timeStep * 0.1; - attributes.UpdateFields(time); + unsigned int number_of_time_steps = 10; + for (unsigned int time_step = 0; time_step < number_of_time_steps; time_step++) { + // use a time step length of 0.1 + double time = time_step * 0.1; + attributes.update_fields(time); - code = PDIAdaptor::Execute(timeStep, time, grid, attributes); - if (!code) - { - std::cerr << "PDIAdaptor::Execute failure" << std::endl; - return EXIT_FAILURE; - } - } + code = PDI_adaptor::execute(time_step, time, grid, attributes); + if (!code) { + std::cerr << "PDIAdaptor::Execute failure" << std::endl; + return EXIT_FAILURE; + } + } - code = PDIAdaptor::Finalize(); - if (!code) - { - std::cerr << "PDIAdaptor::Finalize failure" << std::endl; - return EXIT_FAILURE; - } + code = PDI_adaptor::finalize(); + if (!code) { + std::cerr << "PDIAdaptor::Finalize failure" << std::endl; + return EXIT_FAILURE; + } - MPI_Finalize(); - return EXIT_SUCCESS; + MPI_Finalize(); + return EXIT_SUCCESS; } From d7b8c2a3973f98868a3cd3014184c2aec2e0c7d2 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Fri, 5 Dec 2025 10:54:53 +0100 Subject: [PATCH 20/31] filename in snake case --- plugins/catalyst/pdi_catalyst_plugin.cxx | 38 +++++++++---------- plugins/catalyst/pdi_catalyst_plugin.h | 14 +++---- plugins/catalyst/tests/CMakeLists.txt | 13 +++---- .../tests/{Attributes.cxx => attributes.cxx} | 4 +- .../tests/{Attributes.h => attributes.h} | 0 plugins/catalyst/tests/{Grid.cxx => grid.cxx} | 2 +- plugins/catalyst/tests/{Grid.h => grid.h} | 0 plugins/catalyst/tests/main.cxx | 18 ++++----- .../tests/{PDIAdaptor.cxx => pdi_adaptor.cxx} | 8 ++-- .../tests/{PDIAdaptor.h => pdi_adaptor.h} | 4 +- 10 files changed, 50 insertions(+), 51 deletions(-) rename plugins/catalyst/tests/{Attributes.cxx => attributes.cxx} (96%) rename plugins/catalyst/tests/{Attributes.h => attributes.h} (100%) rename plugins/catalyst/tests/{Grid.cxx => grid.cxx} (99%) rename plugins/catalyst/tests/{Grid.h => grid.h} (100%) rename plugins/catalyst/tests/{PDIAdaptor.cxx => pdi_adaptor.cxx} (95%) rename plugins/catalyst/tests/{PDIAdaptor.h => pdi_adaptor.h} (84%) diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index b8ab2f06a..14be612e3 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -13,7 +13,7 @@ catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) : Plugin{ctx} , m_spec_tree(spec_tree) { - ctx.callbacks().add_init_callback([this]() { this->process_PDI_init(); }); + ctx.callbacks().add_init_callback([this]() { this->process_pdi_init(); }); ctx.callbacks().add_data_callback([this](const std::string& data_name, PDI::Ref ref) { this->process_data(data_name, ref); }); ctx.callbacks().add_event_callback([this](const std::string& event_name) { this->process_event(event_name); }); } @@ -23,27 +23,27 @@ catalyst_plugin::~catalyst_plugin() run_catalyst_finalize(); } -void catalyst_plugin::process_PDI_init() +void catalyst_plugin::process_pdi_init() { this->run_catalyst_initialize(); - this->m_PDI_execute_event_name = this->read_PDI_execute_event_name(); + this->m_pdi_execute_event_name = this->read_pdi_execute_event_name(); } void catalyst_plugin::process_data(const std::string& data_name, PDI::Ref ref) { context().logger().debug("User has shared a data named {}", data_name); - auto it = this->m_current_PDI_data.find(data_name); - if (it != this->m_current_PDI_data.end()) { + auto it = this->m_current_pdi_data.find(data_name); + if (it != this->m_current_pdi_data.end()) { context().logger().warn("Data named '{}' already recorded, the previous value will overwritten.", data_name); it->second = ref.copy(); } else { - this->m_current_PDI_data.emplace(data_name, ref); + this->m_current_pdi_data.emplace(data_name, ref); } } void catalyst_plugin::process_event(const std::string& event_name) { - if (event_name == this->m_PDI_execute_event_name) { + if (event_name == this->m_pdi_execute_event_name) { run_catalyst_execute(); } } @@ -103,7 +103,7 @@ void catalyst_plugin::run_catalyst_execute() auto data_type = spec_ref.type()->evaluate(context()); if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_scalar_PDI_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); } else { context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); } @@ -118,7 +118,7 @@ void catalyst_plugin::run_catalyst_execute() auto data_type = spec_ref.type()->evaluate(context()); if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_scalar_PDI_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); + fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); } else { context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); } @@ -144,7 +144,7 @@ void catalyst_plugin::run_catalyst_execute() for (int i = data_tree_size - 1; i >= 0; --i) { auto key = PC_get(current.tree, "{%d}", i); if (PDI::to_string(key) == "PDI_data_array") { - this->fill_node_with_PDI_data_array(conduit_cpp::c_node(¤t_node), current.tree); + this->fill_node_with_pdi_data_array(conduit_cpp::c_node(¤t_node), current.tree); pdi_data_array = true; break; // break the loop } @@ -172,8 +172,8 @@ void catalyst_plugin::run_catalyst_execute() context().logger().error("catalyst_execute failure"); } - // clear m_current_PDI_data at each iteration - this->m_current_PDI_data.clear(); + // clear m_current_pdi_data at each iteration + this->m_current_pdi_data.clear(); } void catalyst_plugin::run_catalyst_finalize() @@ -186,7 +186,7 @@ void catalyst_plugin::run_catalyst_finalize() } } -void catalyst_plugin::fill_node_with_PDI_data_array(conduit_node* node, PC_tree_t tree) +void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t tree) { auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { @@ -196,8 +196,8 @@ void catalyst_plugin::fill_node_with_PDI_data_array(conduit_node* node, PC_tree_ std::string name = PDI::to_string(name_spec); - auto it = this->m_current_PDI_data.find(name); - if (it == this->m_current_PDI_data.end()) { + auto it = this->m_current_pdi_data.find(name); + if (it == this->m_current_pdi_data.end()) { context().logger().error("Can't find the PDI_data named: {}", name); return; } @@ -211,13 +211,13 @@ void catalyst_plugin::fill_node_with_PDI_data_array(conduit_node* node, PC_tree_ auto data_type = ref_r.type(); if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_array_PDI_data(node, name, tree, *array_datatype, ref_r); + fill_node_with_array_pdi_data(node, name, tree, *array_datatype, ref_r); } else { context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); } } -void catalyst_plugin::fill_node_with_scalar_PDI_data( +void catalyst_plugin::fill_node_with_scalar_pdi_data( conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, @@ -265,7 +265,7 @@ void catalyst_plugin::fill_node_with_scalar_PDI_data( } } -void catalyst_plugin::fill_node_with_array_PDI_data( +void catalyst_plugin::fill_node_with_array_pdi_data( conduit_node* node, const std::string& name, PC_tree_t& tree, @@ -503,7 +503,7 @@ long catalyst_plugin::get_long_value_from_spec_node(PC_tree_t& spec, const std:: return 0; } -std::string catalyst_plugin::read_PDI_execute_event_name() +std::string catalyst_plugin::read_pdi_execute_event_name() { std::string event_name; auto execute_spec = PC_get(this->m_spec_tree, ".on_event"); diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index 41547db25..4a4f77cfd 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -25,16 +25,16 @@ class catalyst_plugin: public PDI::Plugin ~catalyst_plugin(); private: - void process_PDI_init(); + void process_pdi_init(); void process_data(const std::string& data_name, PDI::Ref ref); void process_event(const std::string& event_name); void run_catalyst_initialize(); void run_catalyst_execute(); void run_catalyst_finalize(); - void fill_node_with_PDI_data_array(conduit_node* node, PC_tree_t tree); - void fill_node_with_scalar_PDI_data(conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); - void fill_node_with_array_PDI_data( + void fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t tree); + void fill_node_with_scalar_pdi_data(conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); + void fill_node_with_array_pdi_data( conduit_node* node, const std::string& name, PC_tree_t& tree, @@ -42,11 +42,11 @@ class catalyst_plugin: public PDI::Plugin PDI::Ref_r& ref_r ); long get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name); - std::string read_PDI_execute_event_name(); + std::string read_pdi_execute_event_name(); PC_tree_t m_spec_tree; - std::unordered_map m_current_PDI_data; - std::string m_PDI_execute_event_name; + std::unordered_map m_current_pdi_data; + std::string m_pdi_execute_event_name; }; PDI_PLUGIN(catalyst) diff --git a/plugins/catalyst/tests/CMakeLists.txt b/plugins/catalyst/tests/CMakeLists.txt index cb44f0ef2..83bc76b27 100644 --- a/plugins/catalyst/tests/CMakeLists.txt +++ b/plugins/catalyst/tests/CMakeLists.txt @@ -1,12 +1,12 @@ # Creation of executable add_executable(TestPDICatalyst - Grid.cxx - Grid.h - Attributes.h - Attributes.cxx + grid.cxx + grid.h + attributes.h + attributes.cxx main.cxx - PDIAdaptor.h - PDIAdaptor.cxx) + pdi_adaptor.h + pdi_adaptor.cxx) # MPI find_package(MPI COMPONENTS C CXX REQUIRED) @@ -17,7 +17,6 @@ target_link_libraries(TestPDICatalyst set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests) configure_file(pdi.yml.in pdi.yml) -# find_package(Python3 REQUIRED COMPONENTS Interpreter Development) //To be checked when is necessary find_package(Python3 COMPONENTS Interpreter) add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/") diff --git a/plugins/catalyst/tests/Attributes.cxx b/plugins/catalyst/tests/attributes.cxx similarity index 96% rename from plugins/catalyst/tests/Attributes.cxx rename to plugins/catalyst/tests/attributes.cxx index d635d58a8..68b6c3b67 100644 --- a/plugins/catalyst/tests/Attributes.cxx +++ b/plugins/catalyst/tests/attributes.cxx @@ -1,6 +1,6 @@ -#include "Attributes.h" +#include "attributes.h" -#include "Grid.h" +#include "grid.h" Attributes::Attributes(Grid* grid) { diff --git a/plugins/catalyst/tests/Attributes.h b/plugins/catalyst/tests/attributes.h similarity index 100% rename from plugins/catalyst/tests/Attributes.h rename to plugins/catalyst/tests/attributes.h diff --git a/plugins/catalyst/tests/Grid.cxx b/plugins/catalyst/tests/grid.cxx similarity index 99% rename from plugins/catalyst/tests/Grid.cxx rename to plugins/catalyst/tests/grid.cxx index 3d0378e77..7d75daa0c 100644 --- a/plugins/catalyst/tests/Grid.cxx +++ b/plugins/catalyst/tests/grid.cxx @@ -1,4 +1,4 @@ -#include "Grid.h" +#include "grid.h" #include #include diff --git a/plugins/catalyst/tests/Grid.h b/plugins/catalyst/tests/grid.h similarity index 100% rename from plugins/catalyst/tests/Grid.h rename to plugins/catalyst/tests/grid.h diff --git a/plugins/catalyst/tests/main.cxx b/plugins/catalyst/tests/main.cxx index 7aa4f0a48..386ba0cc2 100644 --- a/plugins/catalyst/tests/main.cxx +++ b/plugins/catalyst/tests/main.cxx @@ -1,6 +1,6 @@ -#include "Attributes.h" -#include "Grid.h" -#include "PDIAdaptor.h" +#include "attributes.h" +#include "grid.h" +#include "pdi_adaptor.h" #include #include @@ -18,9 +18,9 @@ int main(int argc, char* argv[]) std::cerr << "expecting the pdi yaml config as argument" << std::endl; return EXIT_FAILURE; } - auto code = PDI_adaptor::initialize(std::string(argv[1]), grid); + auto code = pdi_adaptor::initialize(std::string(argv[1]), grid); if (!code) { - std::cerr << "PDIAdaptor::Initialize failure" << std::endl; + std::cerr << "pdi_adaptor::Initialize failure" << std::endl; return EXIT_FAILURE; } @@ -30,16 +30,16 @@ int main(int argc, char* argv[]) double time = time_step * 0.1; attributes.update_fields(time); - code = PDI_adaptor::execute(time_step, time, grid, attributes); + code = pdi_adaptor::execute(time_step, time, grid, attributes); if (!code) { - std::cerr << "PDIAdaptor::Execute failure" << std::endl; + std::cerr << "pdi_adaptor::Execute failure" << std::endl; return EXIT_FAILURE; } } - code = PDI_adaptor::finalize(); + code = pdi_adaptor::finalize(); if (!code) { - std::cerr << "PDIAdaptor::Finalize failure" << std::endl; + std::cerr << "pdi_adaptor::Finalize failure" << std::endl; return EXIT_FAILURE; } diff --git a/plugins/catalyst/tests/PDIAdaptor.cxx b/plugins/catalyst/tests/pdi_adaptor.cxx similarity index 95% rename from plugins/catalyst/tests/PDIAdaptor.cxx rename to plugins/catalyst/tests/pdi_adaptor.cxx index 0ce1e57ad..ca03ff08a 100644 --- a/plugins/catalyst/tests/PDIAdaptor.cxx +++ b/plugins/catalyst/tests/pdi_adaptor.cxx @@ -1,13 +1,13 @@ -#include "PDIAdaptor.h" +#include "pdi_adaptor.h" -#include "Attributes.h" -#include "Grid.h" +#include "attributes.h" +#include "grid.h" #include #include #include -namespace PDI_adaptor { +namespace pdi_adaptor { bool initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid) { diff --git a/plugins/catalyst/tests/PDIAdaptor.h b/plugins/catalyst/tests/pdi_adaptor.h similarity index 84% rename from plugins/catalyst/tests/PDIAdaptor.h rename to plugins/catalyst/tests/pdi_adaptor.h index 2cfcdbcad..6c7a6af89 100644 --- a/plugins/catalyst/tests/PDIAdaptor.h +++ b/plugins/catalyst/tests/pdi_adaptor.h @@ -6,10 +6,10 @@ class Grid; class Attributes; -namespace PDI_adaptor { +namespace pdi_adaptor { bool initialize(const std::string& pdi_yaml_config_file_path, const Grid& grid); bool execute(int cycle, double time, Grid& grid, Attributes& attribs); bool finalize(); -} // namespace PDI_adaptor +} // namespace pdi_adaptor #endif From 44962cd775fda5f34c25db89bb5898b383bf0c73 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Wed, 18 Feb 2026 15:28:30 +0100 Subject: [PATCH 21/31] small changes --- plugins/catalyst/CMakeLists.txt | 2 +- plugins/catalyst/README.md | 2 ++ plugins/catalyst/tests/catalyst_pipeline_with_rendering.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 3088e2397..93ed07ff2 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -8,7 +8,7 @@ project(pdi_catalyst_plugin LANGUAGES C CXX) # PDI find_package(PDI REQUIRED COMPONENTS plugins) -# Python CATALYST_WRAP_FORTRAN=ON +# Python find_package(Python3 REQUIRED COMPONENTS Interpreter Development) # Catalyst diff --git a/plugins/catalyst/README.md b/plugins/catalyst/README.md index a7cd9c66d..81b990e2e 100644 --- a/plugins/catalyst/README.md +++ b/plugins/catalyst/README.md @@ -19,6 +19,8 @@ This PDI plugin pushes PDI shared data to the Catalyst 2 API. The goal is to lev The test executable expects the config yaml file as arguments. +# Use Catalyst-Paraview + To use the Catalyst-ParaView implementation, you should also set the following environment variables: - `CATALYST_IMPLEMENTATION_NAME=paraview` - `CATALYST_IMPLEMENTATION_PATHS=path/to/paraview/install/lib/catalyst` diff --git a/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py b/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py index 51769ac96..e2d8c5291 100644 --- a/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py +++ b/plugins/catalyst/tests/catalyst_pipeline_with_rendering.py @@ -63,8 +63,8 @@ # Catalyst options options = catalyst.Options() ## 0: no client, generate the images -## 1: interactif -options.EnableCatalystLive = 0 +## 1: live visualization +options.EnableCatalystLive = 1 # Greeting to ensure that ctest knows this script is being imported From 43bf19fa8ead3d341aac18b66bc4be211faac18c Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 14 Apr 2026 13:30:59 +0200 Subject: [PATCH 22/31] adding example with catalyst --- example/CMakeLists.txt | 35 ++++++ example/catalyst.yml.in | 57 +++++++++ example/catalyst_pipeline_with_rendering.py | 131 ++++++++++++++++++++ example/catalyst_serial.yml.in | 58 +++++++++ example/example.c | 2 +- 5 files changed, 282 insertions(+), 1 deletion(-) create mode 100644 example/catalyst.yml.in create mode 100644 example/catalyst_pipeline_with_rendering.py create mode 100644 example/catalyst_serial.yml.in diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt index 30a736faa..01ef958d3 100644 --- a/example/CMakeLists.txt +++ b/example/CMakeLists.txt @@ -161,3 +161,38 @@ add_test(NAME PDI_example_trace_P COMMAND "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" set_property(TEST PDI_example_trace_P PROPERTY TIMEOUT 15) set_property(TEST PDI_example_trace_P PROPERTY PROCESSORS 3) endif("${BUILD_PYTHON}") + +if("${BUILD_CATALYST_PLUGIN}") +find_package(catalyst REQUIRED) +## These examples are only valid with MPI, check if catalyst is compiling with MPI +if("${CATALYST_USE_MPI}") + +## set directory to find the catalyst python script +set(CATALYST_SCRIPT_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}) +## configure the yaml file with the previous directory +configure_file(catalyst_serial.yml.in ${CMAKE_BINARY_DIR}/catalyst_serial.yml) ## keep serial for personal testing +configure_file(catalyst.yml.in ${CMAKE_BINARY_DIR}/catalyst.yml) + +add_test(NAME PDI_example_catalyst_C_serial COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 1 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst_serial.yml") +set_property(TEST PDI_example_catalyst_C_serial PROPERTY TIMEOUT 15) +set_property(TEST PDI_example_catalyst_C_serial PROPERTY PROCESSORS 1) + +add_test(NAME PDI_example_catalyst_C COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst.yml") +set_property(TEST PDI_example_catalyst_C PROPERTY TIMEOUT 15) +set_property(TEST PDI_example_catalyst_C PROPERTY PROCESSORS 3) + +## +# if("${BUILD_FORTRAN}") +# add_test(NAME PDI_example_catalyst_F COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_CURRENT_SOURCE_DIR}/catalyst.yml") +# set_property(TEST PDI_example_catalyst_F PROPERTY TIMEOUT 15) +# set_property(TEST PDI_example_catalyst_F PROPERTY PROCESSORS 3) +# endif("${BUILD_FORTRAN}") + +# if("${BUILD_PYTHON}") +# add_test(NAME PDI_example_catalyst_P COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" "${Python3_EXECUTABLE}" ${MPIEXEC_POSTFLAGS} "${CMAKE_CURRENT_SOURCE_DIR}/example.py" "${CMAKE_CURRENT_SOURCE_DIR}/catalyst.yml") +# set_property(TEST PDI_example_catalyst_P PROPERTY TIMEOUT 15) +# set_property(TEST PDI_example_catalyst_P PROPERTY PROCESSORS 3) +# endif("${BUILD_PYTHON}") + +endif("${CATALYST_USE_MPI}") +endif("${BUILD_CATALYST_PLUGIN}") diff --git a/example/catalyst.yml.in b/example/catalyst.yml.in new file mode 100644 index 000000000..daf777769 --- /dev/null +++ b/example/catalyst.yml.in @@ -0,0 +1,57 @@ +# duration in seconds +duration: 1.75 +# global [height, width] (excluding boundary conditions or ghosts) +datasize: [60, 12] +# degree of parallelism +parallelism: { height: 3, width: 1 } + +# only the following config is passed to PDI +pdi: + metadata: # type of small values for which PDI keeps a copy + iter: int # current iteration id + dsize: { size: 2, type: array, subtype: int } # local data size including ghosts/boundary + psize: { size: 2, type: array, subtype: int } # number of processes in each dimension + pcoord: { size: 2, type: array, subtype: int } # coordinate of the process + data: # type of values for which PDI does not keep a copy + main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: double } + + logging: debug + plugins: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + on_event: "newiter" + execute: + state: + timestep: '$iter' + time: '1.0*$iter' + multiblock: 0 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "uniform" + dims: { i: '1+$dsize[1]', j: '1+$dsize[0]' } + origin: + x: '1.0*$pcoord[1]*($dsize[1]-2.0)-1.0' + y: '1.0*$pcoord[0]*($dsize[0]-2.0)-1.0' + spacing: { dx: 1.0, dy: 1.0 } + topologies: + my_mesh: + type: "uniform" + coordset: "my_coords" +# elements: +# dims: {offsets: [] strides: []} + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "main_field" + size: '$dsize[0]*$dsize[1]' + ghost_layers: + my_mesh: { association: "element", start: ['1', '1'], size: ['$dsize[1]-2', '$dsize[0]-2'] } \ No newline at end of file diff --git a/example/catalyst_pipeline_with_rendering.py b/example/catalyst_pipeline_with_rendering.py new file mode 100644 index 000000000..247612c67 --- /dev/null +++ b/example/catalyst_pipeline_with_rendering.py @@ -0,0 +1,131 @@ +# script-version: 2.0 +from paraview.simple import * +from paraview import catalyst +import time + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +# ---------------------------------------------------------------- +# setup views used in the visualization +# ---------------------------------------------------------------- + +# ######## render view temperature + +# Create a new 'Render View' +renderView1 = CreateView('RenderView') +# renderView1.Set( +# ViewSize=[800, 600], +# InteractionMode='2D', +# CenterOfRotation=[20.0, 3.0, 0.0], +# CameraPosition=[20.0, 30.0, 408.7], +# CameraFocalPoint=[20.0, 30.0, 0.0], +# CameraFocalDisk=1.0, +# CameraParallelScale=32.0, +# ) + +renderView1.ViewSize=[800, 600] +renderView1.InteractionMode='2D' +renderView1.CenterOfRotation=[20.0, 3.0, 0.0] +renderView1.CameraPosition=[20.0, 30.0, 408.7] +renderView1.CameraFocalPoint=[20.0, 30.0, 0.0] +renderView1.CameraFocalDisk=1.0, +renderView1.CameraParallelScale=32.0 + + +# get color transfer function/color map for 'temperature' +temperatureLUT = GetColorTransferFunction('temperature') +## RGB: first line: min value, last line: max value +# temperatureLUT.Set( +# RGBPoints=GenerateRGBPoints( +# range_min=0.0, +# range_max=200.0, +# ), +# ScalarRangeInitialized=1.0, +# ) + + +temperatureLUT.RGBPoints=[0.0, 0.231373, 0.298039, 0.752941, + 500000.0, 0.865003, 0.865003, 0.865003, + 1000000, 0.705882, 0.0156863, 0.14902] + +temperatureLUT.ScalarRangeInitialized=1.0 + + + +# show data from grid +## wgridDisplay = Show(producer, renderView1, 'UnstructuredGridRepresentation') +gridDisplay = Show(producer, renderView1, 'StructuredGridRepresentation') + +gridDisplay.Representation = 'Surface With Edges' +gridDisplay.ColorArrayName = ['CELLS', 'temperature'] +gridDisplay.LookupTable = temperatureLUT + +# get color legend/bar for temperatureLUT in view renderView1 +temperatureLUTColorBar = GetScalarBar(temperatureLUT, renderView1) +temperatureLUTColorBar.Title = 'temperature' + +# set color bar visibility +temperatureLUTColorBar.Visibility = 1 + +# show color legend +gridDisplay.SetScalarBarVisibility(renderView1, True) + +# # ---------------------------------------------------------------- +# # setup extractors +# # ---------------------------------------------------------------- + +SetActiveView(renderView1) +# create extractor +pNG2= CreateExtractor('PNG', renderView1, registrationName='PNG2') +# trace defaults for the extractor. +pNG2.Trigger = 'TimeStep' + +# init the 'PNG' selected for 'Writer' +pNG2.Writer.FileName = 'temperature_screenshot_{timestep:06d}.png' +pNG2.Writer.ImageResolution=[800, 600] +pNG2.Writer.Format = 'PNG' + +# # ---------------------------------------------------------------- +# # setup extractor for saving the solution in VTK file +# # ---------------------------------------------------------------- + +extractor_vtk_file = None + +mesh_grid = producer.GetClientSideObject().GetOutputDataObject(0) +if mesh_grid.IsA('vtkUnstructuredGrid'): + extractor_vtk_file = CreateExtractor('VTU', producer, registrationName='VTU') +elif mesh_grid.IsA('vtkMultiBlockDataSet'): + extractor_vtk_file = CreateExtractor('VTM', producer, registrationName='VTM') +elif mesh_grid.IsA('vtkPartitionedDataSet'): + extractor_vtk_file = CreateExtractor('VTPD', producer, registrationName='VTPD') +else: + raise RuntimeError("Unsupported data type: %s. Check that the adaptor is providing channel named %s", + mesh_grid.GetClassName(), "grid") + + +# ------------------------------------------------------------------------------ +# Catalyst options +options = catalyst.Options() +## 0: no client, generate the png images and vtk files. +## 1: interactive +options.EnableCatalystLive = 0 + + +# Greeting to ensure that ctest knows this script is being imported +print("#############################################################") +print("executing catalyst_pipeline") +print("#############################################################") +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) + print("temperature-range:", producer.CellData["temperature"].GetRange(0)) + # In a real simulation sleep is not needed. We use it here to slow down the + # "simulation" and make sure ParaView client can catch up with the produced + # results instead of having all of them flashing at once. + if options.EnableCatalystLive: + time.sleep(0.1) diff --git a/example/catalyst_serial.yml.in b/example/catalyst_serial.yml.in new file mode 100644 index 000000000..75fed7163 --- /dev/null +++ b/example/catalyst_serial.yml.in @@ -0,0 +1,58 @@ +# duration in seconds +duration: 2.75 +# global [height, width] (excluding boundary conditions or ghosts) +datasize: [60, 12] +# degree of parallelism +parallelism: { height: 1 , width: 1 } + +# only the following config is passed to PDI +pdi: + metadata: # type of small values for which PDI keeps a copy + iter: int # current iteration id + dsize: { size: 2, type: array, subtype: int } # local data size including ghosts/boundary + psize: { size: 2, type: array, subtype: int } # number of processes in each dimension + pcoord: { size: 2, type: array, subtype: int } # coordinate of the process + data: # type of values for which PDI does not keep a copy + main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: double } + + logging: debug + plugins: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + on_event: "newiter" + execute: + state: + timestep: $iter + time: 1.0*$iter + multiblock: 0 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "uniform" + dims: { i: '1+$dsize[1]', j: '1+$dsize[0]' } + origin: + x: 1.0*$pcoord[1]*($dsize[1]-2.0)-1.0 + y: 1.0*$pcoord[0]*($dsize[0]-2.0)-1.0 + spacing: { dx: 1.0, dy: 1.0 } + topologies: + my_mesh: + type: "uniform" + coordset: "my_coords" + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "main_field" + size: $dsize[0]*$dsize[1] + ghost_layers: + my_mesh: + association: "element" + start: ['1', '1'] + size: ['$dsize[1]-2', '$dsize[0]-2'] \ No newline at end of file diff --git a/example/example.c b/example/example.c index 1c827212b..00cb1ba29 100644 --- a/example/example.c +++ b/example/example.c @@ -214,7 +214,7 @@ int main(int argc, char* argv[]) #ifndef WITHOUT_PARACONF PC_double(PC_get(conf, ".duration"), &duration); #else - duration = 0.1; + duration = 1.75; #endif // get local & add ghosts to sizes From 881c45fedc99785d0112044b7343acc44b88010b Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 14 Apr 2026 13:34:37 +0200 Subject: [PATCH 23/31] Update catalyst plugin core --- plugins/catalyst/CMakeLists.txt | 49 +- plugins/catalyst/README.md | 15 +- .../catalyst_plugin_structured_ghost.h | 285 +++++++++ plugins/catalyst/pdi_catalyst_plugin.cxx | 598 ++++++++++++------ plugins/catalyst/pdi_catalyst_plugin.h | 84 ++- 5 files changed, 836 insertions(+), 195 deletions(-) create mode 100644 plugins/catalyst/catalyst_plugin_structured_ghost.h diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 93ed07ff2..63bd111aa 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -5,21 +5,59 @@ cmake_minimum_required(VERSION 3.16...3.29) project(pdi_catalyst_plugin LANGUAGES C CXX) +option(BUILD_CATALYST_PLUGIN "Build Catalyst plugin" ON) +option(BUILD_FORTRAN "Enable Fortran support" OFF) +option(BUILD_CATALYST_PARALLEL "Enable Catalyst parallel build" ON) + +include(CTest) + +# if("${BUILD_TESTING}" AND "${BUILD_FORTRAN}") +# enable_language(Fortran) +# set(PDI_COMPONENTS f90) +# endif() + +include(GNUInstallDirs) + # PDI -find_package(PDI REQUIRED COMPONENTS plugins) +#find_package(PDI REQUIRED COMPONENTS plugins) # Python find_package(Python3 REQUIRED COMPONENTS Interpreter Development) # Catalyst +# find_package(catalyst REQUIRED) +# if(NOT ${CATALYST_USE_MPI}) +# message(WARNING "No MPI support in your Catalyst library, please activate MPI in your Catalyst build if you want to use in parallel.") +# endif() + find_package(catalyst REQUIRED) -if(NOT ${CATALYST_USE_MPI}) - message(WARNING "No MPI support in your Catalyst library, please activate MPI in your Catalyst build if you want to use in parallel.") +if("${BUILD_CATALYST_PARALLEL}" AND NOT "${CATALYST_USE_MPI}") + message(FATAL_ERROR "Catalyst with MPI support required, sequential catalyst only found. Please set -DBUILD_CATALYST_PARALLEL=OFF to disable parallel Catalyst") +endif() + +# MPI +if("${CATALYST_USE_MPI}") + # if("${BUILD_TESTING}" AND "${BUILD_FORTRAN}") + # set(MPI_COMPONENTS Fortran) + # endif() + find_package(MPI REQUIRED COMPONENTS C) # ${MPI_COMPONENTS}) + set(CATALYST_DEPS catalyst::catalyst MPI::MPI_C) endif() +option(CATALYST_IS_PARALLEL "Catalyst is build in parallel" "${CATALYST_USE_MPI}") +message(WARNING "CATALYST_IS_PARALLEL=${CATALYST_IS_PARALLEL}") +message(WARNING "${CATALYST_USE_MPI}=${CATALYST_USE_MPI}") + +# PDI +find_package(PDI REQUIRED COMPONENTS plugins) # ${PDI_COMPONENTS}) + # The Plugin add_library(pdi_catalyst_plugin MODULE pdi_catalyst_plugin.cxx) -target_link_libraries(pdi_catalyst_plugin PDI::PDI_plugins catalyst::catalyst) +if("${CATALYST_USE_MPI}") + #target_include_directories(pdi_catalyst_plugin PRIVATE "${MPI_C_INCLUDE_DIRS}") + target_compile_definitions(pdi_catalyst_plugin PUBLIC CATALYST_IS_PARALLEL=ON) +endif() +target_link_libraries(pdi_catalyst_plugin PDI::PDI_plugins ${CATALYST_DEPS}) # Installation set(INSTALL_PDIPLUGINDIR "${PDI_DEFAULT_PLUGINDIR}" CACHE PATH "PDI plugins (${PDI_DEFAULT_PLUGINDIR})") @@ -31,4 +69,5 @@ install(TARGETS pdi_catalyst_plugin if("${BUILD_TESTING}") enable_testing() add_subdirectory(tests) -endif() + add_subdirectory(tests_ghost) +endif() \ No newline at end of file diff --git a/plugins/catalyst/README.md b/plugins/catalyst/README.md index 81b990e2e..35418a81f 100644 --- a/plugins/catalyst/README.md +++ b/plugins/catalyst/README.md @@ -42,6 +42,7 @@ So, the user of this plugin should set an event name referenced by the `on_event Internally, `catalyst_initialize` is called by `PDI_Init` and `catalyst_finalize` is called by `PDI_finalize`. +# IMPORTANT NOTICE: YAML CONFIGURATION FILE In the sub-tree corresponding to the catalyst plugin, a double quoted value is evaluated as a string. @@ -56,6 +57,18 @@ Be careful, if you compile conduit with 32-bits index (option `CONDUIT_INDEX_32` In the case of real value, the value is evaluated as `double`. Excepted if the real value depend on a data defined in PDI data store. +For real or integer value, we recommend to use simple quoted value as `i: '$numXPoints'` for example instead of plain. + +Summary: + +| scalar type | yaml scalar style | example | +| --- | --- | --- | +| string | double quoted | "mystring" | +| --- | --- | --- | +| integer | plain or simple quoted | 32 or '32' | +| --- | --- | --- | +| real | plain or simple quoted | 1.22 or '1.22'| + # License @@ -65,4 +78,4 @@ The test case is a modification of the Catalyst2 CxxFullExample code from the Pa Developed by Kitware SAS (Kitware Europe), motivated by the [NumPEx](https://numpex.org/) program. -Reach us at https://www.kitware.com/contact/ +Reach us at https://www.kitware.com/contact/ \ No newline at end of file diff --git a/plugins/catalyst/catalyst_plugin_structured_ghost.h b/plugins/catalyst/catalyst_plugin_structured_ghost.h new file mode 100644 index 000000000..b623867e7 --- /dev/null +++ b/plugins/catalyst/catalyst_plugin_structured_ghost.h @@ -0,0 +1,285 @@ +#ifndef catalyst_plugin_structured_ghost_H +#define catalyst_plugin_structured_ghost_H + +#include "catalyst.hpp" + +#include +#include +#include + +#include +#include + +class Catalyst_plugin_structured_ghost +{ + /// Context of this object + PDI::Context& m_ctx; + + /// The tree representing the ghost config + PC_tree_t m_ghost_tree; + /// The parent tree of m_ghost_tree for config_error message + PC_tree_t m_parent_tree; + + /// name of the mesh (It correspond to the topology name in the mesh blue print) + std::string m_topology_name; // topology name + + /// dimensions of the mesh in each direction (including ghosts) + std::vector m_dimensions; + + /// start and size in each direction + std::vector m_start; + std::vector m_size; + std::string m_association; + + /// path in the conduit node for catalyst + std::string m_parent_node_path; + + /// vtkGhostType vector for paraview + std::vector m_vtk_ghost_type; + + public: + + Catalyst_plugin_structured_ghost( PDI::Context& ctx, conduit_node * parent_node, PC_tree_t &tree, PC_tree_t &parent_tree, const int &index) + : m_ctx{ctx}, m_ghost_tree(tree), m_parent_tree(parent_tree), m_vtk_ghost_type{1,0} + { + // get the name of mesh(topology) + auto m_topology_name_spec = PC_get(m_ghost_tree, "{%d}", index); + if (!PC_status(m_topology_name_spec)) { + m_topology_name = PDI::to_string(m_topology_name_spec); + } else { + throw PDI::Config_error{m_ghost_tree, "The name of the topology is not defined."}; + } + + // A TESTER: + std::string value_type = get_name_from_parent_node( parent_node, "type"); + + m_ctx.logger().info("topology type is `{}'", value_type); + + //=============================== + // topo=structured + if (value_type == "structured") { + std::string path_to_dims = "topologies/"+m_topology_name+"/elements/dims/"; + std::string PC_to_dataname = ".topologies."+m_topology_name; + + get_dimension( parent_node, path_to_dims, PC_to_dataname, "topology", m_topology_name); + } + //================ + // topo=uniform + else if (value_type == "uniform") { + + // A TESTER: + std::string value_coordset = get_name_from_parent_node( parent_node, "coordset"); + + m_ctx.logger().info("For the uniform topology `{}', the name of coordset is `{}'", m_topology_name, value_coordset); + + std::string path_to_origins = "topologies/"+m_topology_name+"/origin"; + //if (value_type == "uniform" && conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { + if (conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { + throw PDI::Config_error{m_ghost_tree, "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview."}; + } + + std::string path_to_dims = "coordsets/"+value_coordset+"/dims/"; + std::string PC_to_dataname = ".coordsets."+value_coordset; + + get_dimension( parent_node, path_to_dims, PC_to_dataname, "coordset", value_coordset); + + // The value in dims_vecGhost corresponding to the number of points in each direction + // We remove 1 because we consider the number of elements in each direction when we create vtkGhostType + for (int ii=0; ii", index); + + PDI::each(mask_ghost_spec , [&](PC_tree_t key_tree, PC_tree_t value) { + std::string key = PDI::to_string(key_tree); + m_ctx.logger().info("key= {}", key); + if (key == "size") { + PDI::opt_each(value, [&](PC_tree_t size) { m_size.emplace_back(PDI::to_string(size)); }); + } else if (key == "start") { + PDI::opt_each(value, [&](PC_tree_t start) { m_start.emplace_back(PDI::to_string(start)); }); + } else if (key == "association") { + m_association = PDI::to_string(value); + } + else { + throw PDI::Config_error{key_tree, "Invalid configuration key in mask_ghost for topology `{}': `{}'", m_topology_name, key}; + } + }); + + if (m_size.size() != m_start.size()) { + throw PDI::Config_error{m_ghost_tree, "Invalid configuration in mask_ghost for topology `{}' the number of elements in size and in start are not the same.", m_topology_name}; + } + if (m_size.size() != m_dimensions.size()) { + throw PDI::Config_error{m_parent_tree, "Invalid configuration in mask_ghost for topology `{}', the dimension of the problem `{}' is not equal to `{}' the number of elements in size and in start.", m_topology_name, m_dimensions.size(), m_size.size()}; + } + + // check size + start + dims (TODO: en dernier) + + m_ctx.logger().info("space dimension {}", m_dimensions.size()); + for (int ii=0; ii < m_dimensions.size(); ++ii){ + m_ctx.logger().info("`{}'-th dimensions of the mesh `{}'", ii, m_dimensions[ii]); + } + + m_parent_node_path = conduit_cpp::cpp_node(parent_node).path(); + m_ctx.logger().info("conduit node path for the parent node is `{}'", m_parent_node_path); + } + + ~Catalyst_plugin_structured_ghost() {} + + /// creation of the mask ghost (VtkGhostType) need by paraview + void create_vtk_ghost_type() + { + int space_dimension = m_dimensions.size(); + + std::vector last(space_dimension); + std::vector start(space_dimension); + + for (int size_id = 0; size_id < space_dimension; ++size_id) { + last[size_id] = m_size[size_id].to_long(m_ctx); + start[size_id] = m_start[size_id].to_long(m_ctx); + } + + std::transform(start.begin(), start.end(), last.begin(), last.begin(), [](long start, long last) { return last + start; }); + + for (int ii=0; ii= last[0] || ii < start[1] || ii >= last[1]) { + m_vtk_ghost_type[ii*(m_dimensions[0])+jj] = (uint8_t) 1; + } else { + m_vtk_ghost_type[ii*(m_dimensions[0])+jj] = (uint8_t) 0; + } + } + } + } + else if (space_dimension == 3) { + size_t vsize = m_dimensions[0] * m_dimensions[1] * m_dimensions[2]; + m_vtk_ghost_type.resize( vsize ); + for (int ii = 0; ii < m_dimensions[2]; ++ii) { + for (int jj = 0; jj < m_dimensions[1]; ++jj) { + for (int kk = 0; kk < m_dimensions[0]; ++kk) { + if ( kk < start[0] || jj < start[1] || ii < start[2] ) { + m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 1; + } else if ( kk >= last[0] || jj >= last[1] || ii >= last[2] ) { + m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 1; + } else { + m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 0; + } + } + } + } + } + else { + std::cout << " Error in the creation of the vtkGhostType for the users: The dimension for the mesh must be 2 or 3." << std::endl; + } + } + + /// return the pointer to the mask ghost + uint8_t * get_vector() { return m_vtk_ghost_type.data(); } + + /// get the size of the pointer of the mask ghost + size_t get_size() { return m_vtk_ghost_type.size(); } + + /// get the name path in the conduit node + const std::string & get_node_path() const { return m_parent_node_path; } + + /// @brief get the name of topology(mesh) + const std::string & get_topology_name() const { return m_topology_name; } + + + /// @brief retrieve the corresponding PC_tree for a given coordset or a given topology + PC_tree_t retrieve_pc_tree_from_parent_node( const std::string &structname, const std::string &dataname) { + std::string index_all = "."+structname+"."+m_topology_name+"."+dataname; + auto dataname_tree = PC_get(m_parent_tree,index_all.c_str()); + + return dataname_tree; + } + + std::string get_name_from_parent_node( conduit_node * parent_node, const std::string &dataname) { + std::string path_to_type = "topologies/"+m_topology_name+"/"+dataname; + std::string PC_to_type = ".topologies."+m_topology_name+"."+dataname; + + bool dataname_is_empty = conduit_cpp::cpp_node(parent_node)[path_to_type].dtype().is_empty(); + if (!dataname_is_empty) { + bool dataname_is_string = conduit_cpp::cpp_node(parent_node)[path_to_type].dtype().is_string(); + if (dataname_is_string) { + return conduit_cpp::cpp_node(parent_node)[path_to_type].as_string(); + } else{ + PC_tree_t msg_tree = retrieve_pc_tree_from_parent_node("topologies", dataname); + throw PDI::Config_error{msg_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined as a string.", dataname, m_topology_name}; + } + } + else { + throw PDI::Config_error{m_parent_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined.", dataname, m_topology_name}; + } + } + + /// @brief Retrieve the dimension of the mask ghost + /// @param parent_node + /// @param path_to_dims // path in the conduit node where the dimensions are + /// @param PC_to_dataname // path in the PC_tree to get the PC_tree for error message + /// @param data_type // type of data (coordset or topology) where the dimensions are defiened for error message + /// @param data_type_name // name of coordset or name of topology for error message + void get_dimension( conduit_node * parent_node, std::string & path_to_dims, std::string & PC_to_dataname, std::string data_type, std::string &data_type_name) { + + std::string msg_data = data_type + " " + data_type_name; + PC_tree_t msg_tree = PC_get(m_parent_tree, PC_to_dataname.c_str()); + + if ( PC_status(msg_tree)) { + throw PDI::Config_error(msg_tree, ""); + } else { + if (conduit_cpp::cpp_node(parent_node).has_path(path_to_dims)) { + std::list list_dims{"i","j","k"}; + for (auto &&elem: list_dims) { + // verify dims/{elem} exist in the node m_ghost_tree + std::string path_leaf = path_to_dims+elem; + if (conduit_cpp::cpp_node(parent_node).has_path(path_leaf)) { + auto node_path = conduit_cpp::cpp_node(parent_node)[path_leaf]; + // check the variable is an integer + if (node_path.dtype().is_integer()) { + int tmp_int = (int) node_path.to_int(); + m_dimensions.emplace_back(tmp_int); + m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int,msg_data); + } + else if (node_path.dtype().is_long()) { + int tmp_int = (int) node_path.to_long(); + m_dimensions.emplace_back(tmp_int); + m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int,msg_data); + } + else { + throw PDI::Config_error{msg_tree, "For `{}' the value of dims/`{}' is not an integer or a long", msg_data, elem}; + } + } + else{ + // info message in case of dims/i, dims/j, dims/k doesn't exist. + m_ctx.logger().info("No dims/`{}' is not defined for the `{}'.", elem, msg_data); + } + } + if (m_dimensions.size()==0) { + throw PDI::Config_error{msg_tree, "No dims/i , dims/j and dims/k are defined for the `{}'", msg_data}; + } + } else { + throw PDI::Config_error(msg_tree, "For the `{}', we need dims keyword to generate vtkGhostType for catalyst.", msg_data); + } + } + } +}; + + + +#endif // catalyst_plugin_structured_ghost_H diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 14be612e3..68996d529 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -3,18 +3,30 @@ # SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) # SPDX-License-Identifier: Apache 2.0 */ -#include "pdi_catalyst_plugin.h" + +#ifdef CATALYST_IS_PARALLEL +#include +#endif #include "catalyst.hpp" +#include #include +#include +#include + +#include +#include + +#include "pdi_catalyst_plugin.h" catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) : Plugin{ctx} , m_spec_tree(spec_tree) + , catalyst_is_initialize{false} { ctx.callbacks().add_init_callback([this]() { this->process_pdi_init(); }); - ctx.callbacks().add_data_callback([this](const std::string& data_name, PDI::Ref ref) { this->process_data(data_name, ref); }); + // ctx.callbacks().add_data_callback([this](const std::string& data_name, PDI::Ref ref) { this->process_data(data_name, ref); }); ctx.callbacks().add_event_callback([this](const std::string& event_name) { this->process_event(event_name); }); } @@ -29,17 +41,17 @@ void catalyst_plugin::process_pdi_init() this->m_pdi_execute_event_name = this->read_pdi_execute_event_name(); } -void catalyst_plugin::process_data(const std::string& data_name, PDI::Ref ref) -{ - context().logger().debug("User has shared a data named {}", data_name); - auto it = this->m_current_pdi_data.find(data_name); - if (it != this->m_current_pdi_data.end()) { - context().logger().warn("Data named '{}' already recorded, the previous value will overwritten.", data_name); - it->second = ref.copy(); - } else { - this->m_current_pdi_data.emplace(data_name, ref); - } -} +// void catalyst_plugin::process_data(const std::string& data_name, PDI::Ref ref) +// { +// context().logger().debug("User has shared a data named `{}'", data_name); +// auto it = this->m_current_pdi_data.find(data_name); +// if (it != this->m_current_pdi_data.end()) { +// context().logger().warn("Data named '{}' already recorded, the previous value will overwritten.", data_name); +// it->second = ref.copy(); +// } else { +// this->m_current_pdi_data.emplace(data_name, ref); +// } +// } void catalyst_plugin::process_event(const std::string& event_name) { @@ -51,161 +63,335 @@ void catalyst_plugin::process_event(const std::string& event_name) void catalyst_plugin::run_catalyst_initialize() { conduit_cpp::Node node; - auto scripts_node = node["catalyst/scripts"]; + + context().logger().info("Read information for script."); auto scripts_spec = PC_get(this->m_spec_tree, ".scripts"); + if (PC_status(scripts_spec)) { + throw PDI::Config_error(m_spec_tree, "No scripts tree is defiend for catalyst plugin."); + } + int script_number = 0; PC_len(scripts_spec, &script_number); - for (int i = 0; i < script_number; ++i) { - auto key = PC_get(scripts_spec, "{%d}", i); - auto value = PC_get(scripts_spec, "<%d>", i); + if (script_number == 0) { + throw PDI::Config_error(scripts_spec, "Zero python script is defined for catalyst python."); + } else { + context().logger().debug("The number of python script is `{}'", script_number); + } + + auto scripts_node = node["catalyst/scripts"]; + for (int index = 0; index < script_number; ++index) { + auto key = PC_get(scripts_spec, "{%d}", index); + auto value = PC_get(scripts_spec, "<%d>", index); scripts_node[PDI::to_string(key)] = PDI::to_string(value); } + // Remark: Each script is defined as a string (i.e. node["catalyst/scripts/[name_of_the_script]"] = filename ) + // + // We don't consider yet the object script supported by the last version of paraview. + // In others word, the following node is not supported: + // node["catalyst/scripts/[name_of_the_script]/filename"] = string + // node["catalyst/scripts/[name_of_the_script]/args"] = string + +#ifdef CATALYST_IS_PARALLEL + context().logger().info("Read mpi_comm."); + + auto communicator_spec = PC_get(this->m_spec_tree, ".communicator"); + if (!PC_status(communicator_spec)) { + PDI::Expression communicator = PDI::to_string(communicator_spec); + MPI_Comm tmp_comm = *(static_cast(PDI::Ref_r{communicator.to_ref(context())}.get())); + + // create communicator node + auto communicator_node = node["catalyst/mpi_comm"]; + + // set the fortran MPI_COMMUNICATOR + communicator_node.set_int64(static_cast(MPI_Comm_c2f(tmp_comm))); + + context().logger().debug("value of the communicator is {}:", static_cast(MPI_Comm_c2f(tmp_comm))); + } else { + // context().logger().warn("value of the communicator is {}:", static_cast(MPI_Comm_c2f(tmp_comm))); + //throw PDI::Config_error{communicator_spec, "No communicator is given."}; + context().logger().warn("No communicator is given by default the communicator is MPI_COMM_WORD."); + } +#else + context().logger().info("Catalyst is used with no mpi"); + auto communicator_spec = PC_get(this->m_spec_tree, ".communicator"); + if (!PC_status(communicator_spec)) { + context().logger().info("Used Catalyst with no mpi support. Invalid communicator: `{}'", PDI::to_string(communicator_spec)); + throw PDI::Config_error{ + communicator_spec, + "Used Catalyst with no mpi support. Invalid communicator: `{}'", + PDI::to_string(communicator_spec) + }; + } +#endif + + // The following node is supported in the last version of Paraview + // These nodes are not defined yet because we need some investigations. + // node["catalyst_load/implementation"].set("stub") ; + // node["catalyst_load/search_paths"].set("/path/to/install/catalyst/lib/catalyst/"); + // node["catalyst/pipelines"] + // node["catalyst/python_path"] + + if (context().logger().level() == spdlog::level::debug || context().logger().level() == spdlog::level::trace) { + context().logger().debug("Print node before catalyst_initialize call..."); + node.print(); + } + context().logger().debug("catalyst_initialize call..."); auto result = catalyst_initialize(conduit_cpp::c_node(&node)); if (result != catalyst_status_ok) { - context().logger().error("catalyst_initialize failure"); + // context().logger().error("catalyst_initialize failure"); + throw PDI::System_error("catalyst_initialize failure"); } + catalyst_is_initialize = true; } -void catalyst_plugin::run_catalyst_execute() +void catalyst_plugin::read_info_for_creating_vtk_ghost( + conduit_node* execute_node, + PC_tree_t& execute_spec, + std::vector& list_vtkGhostType_to_create +) { - conduit_cpp::Node node; - - auto execute_spec = PC_get(this->m_spec_tree, ".execute"); - // walk the spec tree and create corresponding catalyst nodes. struct Spec_tree_node { PC_tree_t tree; std::string name; conduit_node* parent_node; + PC_tree_t parent_tree; // Adding parent_tree for config error message }; std::stack remaining_tree_and_parent_node; - remaining_tree_and_parent_node.push({execute_spec, "catalyst", conduit_cpp::c_node(&node)}); + remaining_tree_and_parent_node.push({execute_spec, "catalyst", execute_node, execute_spec}); while (!remaining_tree_and_parent_node.empty()) { auto current = remaining_tree_and_parent_node.top(); remaining_tree_and_parent_node.pop(); - auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; - switch (current.tree.node->type) { - case YAML_NO_NODE: - context().logger().error("Unsupported Empty YAML Node for variable {}", current.name); - break; - case YAML_SCALAR_NODE: - switch (current.tree.node->data.scalar.style) { - case YAML_PLAIN_SCALAR_STYLE: - // handle integer or float/double type that doesn't depend on PDI store - { - std::string data_name{PDI::to_string(current.tree)}; - PDI::Expression data_expression{PDI::to_string(current.tree)}; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); - - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); - } else { - context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + context().logger().info("Read node of name`{}'", current.name); + if (current.name == "ghost_layers") { + context().logger().info("Ghost layer node: `{}'", current.name); + + if (current.tree.node->type == YAML_MAPPING_NODE) { + int data_tree_size = PDI::len(current.tree); + if (data_tree_size == 0) { + throw PDI::Config_error(current.tree, "ghost_layers node defined with a mapping node of size 0."); + } else { + context().logger().info("number of meshes(topologies) to consider = `{}'", data_tree_size); + } + + // loop over the meshes in the ghost layers tree + for (int index = data_tree_size - 1; index >= 0; --index) { + list_vtkGhostType_to_create.emplace_back(context(), current.parent_node, current.tree, current.parent_tree, index); + } + } else { + throw PDI::Config_error(current.tree, "ghost_layers node only support yaml mapping node."); + } + } else { + if (current.tree.node->type == YAML_MAPPING_NODE) { + int data_tree_size = PDI::len(current.tree); + std::list name_to_skip{"coordsets", "topologies", "fields", "matsets", "adjsets", "state"}; + + // reverse order to get the correct order when poping the stack. + for (int index = data_tree_size - 1; index >= 0; --index) { + auto key = PC_get(current.tree, "{%d}", index); + std::string keyname = PDI::to_string(key); + + bool skip_key = false; + for (auto&& elem: name_to_skip) { + if (elem == keyname) { + skip_key = true; + break; + } + } + if (!skip_key) { + auto value = PC_get(current.tree, "<%d>", index); + if (conduit_cpp::cpp_node(current.parent_node).has_path(current.name)) { + auto current_node + = conduit_cpp::cpp_node(current.parent_node)[current.name]; // Attention: creation of the node if doesn't exit + remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node), current.tree}); + } else { + throw PDI::System_error("Error in creating vtkGhostType: a conduit node doesn't exist !!"); + } } } + } + } + } +} + +void catalyst_plugin::create_catalyst_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec) +{ + // walk the spec tree and create corresponding catalyst nodes. + struct Spec_tree_node { + PC_tree_t tree; + std::string name; + conduit_node* parent_node; + }; + + std::stack remaining_tree_and_parent_node; + remaining_tree_and_parent_node.push({execute_spec, "catalyst", execute_node}); + while (!remaining_tree_and_parent_node.empty()) { + auto current = remaining_tree_and_parent_node.top(); + remaining_tree_and_parent_node.pop(); + + context().logger().info("Read node of name`{}'", current.name); + if (current.name == "ghost_layers") { + context().logger().info(" ghost_layers keys will be read after"); + // } else if (current.name == "elements") { + // int data_tree_size = PDI::len(current.tree); + // // Check for dynamic PDI Data array + // bool pdi_data_array = false; + // for (int index = data_tree_size - 1; index >= 0; --index) { + // auto key = PC_get(current.tree, "{%d}", index); + // if (PDI::to_string(key) == "dims") { + // PDI::Config_error{current.tree, "I found dims dans elements `{}'", current.name}; + // } else { + // PDI::Config_error{current.tree, "The key is not dims and it is `{}'", PDI::to_string(key)}; + // } + // } + } else { + auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; + switch (current.tree.node->type) { + case YAML_NO_NODE: + throw PDI::Config_error{current.tree, "Unsupported Empty YAML Node for variable `{}'", current.name}; + // context().logger().error("Unsupported Empty YAML Node for variable `{}'", current.name); break; - case YAML_SINGLE_QUOTED_SCALAR_STYLE: - // handle integer or float/double type that depend on scalar PDI data - { - std::string data_name{PDI::to_string(current.tree)}; - PDI::Expression data_expression{PDI::to_string(current.tree)}; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); - - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), data_name, *scalar_datatype, spec_ref); - } else { - context().logger().error("Unsupported datatype for variable: {}. It should be scalar type.", data_name); + case YAML_SCALAR_NODE: + switch (current.tree.node->data.scalar.style) { + case YAML_PLAIN_SCALAR_STYLE: + case YAML_SINGLE_QUOTED_SCALAR_STYLE: + // handle integer or float/double type that depend perhaps on scalar PDI data + { + std::string data_name{PDI::to_string(current.tree)}; + PDI::Expression data_expression{PDI::to_string(current.tree)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + auto data_type = spec_ref.type()->evaluate(context()); + + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { + fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); + } else { + // context().logger().error("Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", data_name); + throw PDI::Config_error{ + current.tree, + "Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", + data_name + }; + } } + break; + case YAML_DOUBLE_QUOTED_SCALAR_STYLE: + // handle string type + current_node.set_string(PDI::to_string(current.tree)); + break; + case YAML_LITERAL_SCALAR_STYLE: + case YAML_FOLDED_SCALAR_STYLE: + case YAML_ANY_SCALAR_STYLE: + // context().logger().error("Unsupported YAML scalar style for variable `{}'", current.name); + throw PDI::Config_error{current.tree, "Unsupported YAML scalar style for variable `{}'", current.name}; + break; } break; - case YAML_DOUBLE_QUOTED_SCALAR_STYLE: - current_node.set_string(PDI::to_string(current.tree)); + case YAML_SEQUENCE_NODE: + // context().logger().error("Unsupported Sequence YAML Node for variable `{}'", current.name); + throw PDI::Config_error{current.tree, "Unsupported Sequence YAML Node for variable `{}'", current.name}; break; - case YAML_LITERAL_SCALAR_STYLE: - case YAML_FOLDED_SCALAR_STYLE: - case YAML_ANY_SCALAR_STYLE: - context().logger().error("Unsupported YAML scalar style for variable {}", current.name); - break; - } - break; - case YAML_SEQUENCE_NODE: - context().logger().error("Unsupported Sequence YAML Node for variable {}", current.name); - break; - case YAML_MAPPING_NODE: - int data_tree_size = PDI::len(current.tree); - // Check for dynamic PDI Data array - bool pdi_data_array = false; - for (int i = data_tree_size - 1; i >= 0; --i) { - auto key = PC_get(current.tree, "{%d}", i); - if (PDI::to_string(key) == "PDI_data_array") { - this->fill_node_with_pdi_data_array(conduit_cpp::c_node(¤t_node), current.tree); - pdi_data_array = true; - break; // break the loop + case YAML_MAPPING_NODE: + int data_tree_size = PDI::len(current.tree); + // Check for dynamic PDI Data array + bool pdi_data_array = false; + for (int index = data_tree_size - 1; index >= 0; --index) { + auto key = PC_get(current.tree, "{%d}", index); + if (PDI::to_string(key) == "PDI_data_array") { + this->fill_node_with_pdi_data_array(conduit_cpp::c_node(¤t_node), current.tree); + pdi_data_array = true; + break; // break the loop + } } + if (pdi_data_array) { + break; // break the case + } + // reverse order to get the correct order when poping the stack. + for (int index = data_tree_size - 1; index >= 0; --index) { + auto key = PC_get(current.tree, "{%d}", index); + auto value = PC_get(current.tree, "<%d>", index); + remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node)}); + } + break; } - if (pdi_data_array) { - break; // break the case - } - - // reverse order to get the correct order when poping the stack. - for (int i = data_tree_size - 1; i >= 0; --i) { - auto key = PC_get(current.tree, "{%d}", i); - auto value = PC_get(current.tree, "<%d>", i); - remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node)}); - } - break; } } +} +void catalyst_plugin::run_catalyst_execute() +{ + conduit_cpp::Node node; + std::vector list_vtkGhostType_to_create; // object contain vector vtkGhostType + auto execute_spec = PC_get(this->m_spec_tree, ".execute"); + + conduit_node* node_pointer = conduit_cpp::c_node(&node); + // create the conduit node for catalyst_execute + create_catalyst_conduit_node(node_pointer, execute_spec); + + // read information to create the vtkGhostType for paraview (read "ghost_layers" node in the yaml file) + //create_node_for_mask_ghost( node_pointer, execute_spec, list_vtkGhostType_to_create); + read_info_for_creating_vtk_ghost(node_pointer, execute_spec, list_vtkGhostType_to_create); + + // creation vtkGhostType vector + for (auto&& vtk_ghost_type: list_vtkGhostType_to_create) { + std::string tmp_path = vtk_ghost_type.get_node_path(); + std::string meshname = vtk_ghost_type.get_topology_name(); + vtk_ghost_type.create_vtk_ghost_type(); + + // create the node corresponding to the different vtkGhostType + node[tmp_path + "/fields/vtkGhostType/association"].set("element"); + node[tmp_path + "/fields/vtkGhostType/topology"].set(meshname); + node[tmp_path + "/fields/vtkGhostType/volume_dependent"].set("false"); + node[tmp_path + "/fields/vtkGhostType/values"].set_external(vtk_ghost_type.get_vector(), vtk_ghost_type.get_size(), 0, 1, sizeof(uint8_t)); + } + + context().logger().debug("Print conduit node including vtk ghost type created ..."); if (context().logger().level() == spdlog::level::debug || context().logger().level() == spdlog::level::trace) { node.print(); } + context().logger().debug("catalyst_execute call..."); - auto result = catalyst_execute(conduit_cpp::c_node(&node)); + auto result = catalyst_execute(node_pointer); if (result != catalyst_status_ok) { - context().logger().error("catalyst_execute failure"); + // context().logger().error("catalyst_execute failure"); + throw PDI::System_error{"catalyst_execute failure"}; } // clear m_current_pdi_data at each iteration - this->m_current_pdi_data.clear(); + // this->m_current_pdi_data.clear(); } void catalyst_plugin::run_catalyst_finalize() { - context().logger().debug("catalyst_finalize call..."); - conduit_cpp::Node node; - auto result = catalyst_finalize(conduit_cpp::c_node(&node)); - if (result != catalyst_status_ok) { - context().logger().error("catalyst_finalize failure"); + if (catalyst_is_initialize) { + context().logger().debug("catalyst_finalize call..."); + conduit_cpp::Node node; + auto result = catalyst_finalize(conduit_cpp::c_node(&node)); + if (result != catalyst_status_ok) { + // context().logger().error("catalyst_finalize failure"); + throw PDI::System_error{"catalyst_finalize failure"}; + } } } -void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t tree) +void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree) { auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { - context().logger().error("No \"name\" child in PDI_data_array spec."); + // context().logger().error("No \"name\" child in PDI_data_array spec."); + throw PDI::Config_error{tree, "No \"name\" child in PDI_data_array spec."}; return; } std::string name = PDI::to_string(name_spec); - - auto it = this->m_current_pdi_data.find(name); - if (it == this->m_current_pdi_data.end()) { - context().logger().error("Can't find the PDI_data named: {}", name); - return; - } - auto ref = it->second; - PDI::Ref_r ref_r{ref}; - + PDI::Ref_r ref_r = context()[name].ref(); if (!ref_r) { - context().logger().error("The PDIData named \"{}\" is not readable.", name); + context().logger().warn("Cannot read `{}' this data is not available", name); + // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error + throw PDI::System_error{"No \"name\" child in PDI_data_array spec `{}'.", name}; return; } @@ -213,17 +399,22 @@ void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_ if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { fill_node_with_array_pdi_data(node, name, tree, *array_datatype, ref_r); } else { - context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); + // context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); + // throw PDI::Config_error{tree, "Unsupported datatype for variable: `{}'. The type should be array type.", name}; + // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error + throw PDI::System_error{"Unsupported datatype for variable: `{}'. The type should be array type.", name}; } } void catalyst_plugin::fill_node_with_scalar_pdi_data( conduit_node* node, + PC_tree_t& tree, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r ) { + // remark: the different type of conduit integer and float is defined in the configuration step of cmake. PDI::Scalar_kind scalar_kind = scalar_datatype.kind(); if (scalar_kind == PDI::Scalar_kind::SIGNED) { auto buffer_size = scalar_datatype.buffersize(); @@ -236,7 +427,8 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( } else if (buffer_size == sizeof(conduit_int64)) { catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); } else { - context().logger().error("Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + // context().logger().error("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::Config_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { auto buffer_size = scalar_datatype.buffersize(); @@ -249,7 +441,8 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( } else if (buffer_size == sizeof(conduit_uint64)) { catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); } else { - context().logger().error("Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + // context().logger().error("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::Config_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { auto buffer_size = scalar_datatype.buffersize(); @@ -258,10 +451,61 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( } else if (buffer_size == sizeof(conduit_float64)) { catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); } else { - context().logger().error("Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + // context().logger().error("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::Config_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; } } else { - context().logger().error("Unknown Scalar Type for variable {}", name); + // context().logger().error("Unknown Scalar Type for variable `{}'", name); + throw PDI::Config_error{tree, "Unknown Scalar Type for variable `{}'", name}; + } +} + +void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::string& name, conduit_index_t& value) +{ + if (!PC_status(spec)) { + long tmp_value; + if (spec.node->type == YAML_SCALAR_NODE) { + PDI::Expression data_expression{PDI::to_string(spec)}; + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + if (!spec_ref) { + // context().logger().error("The PDIData named \"{}\" is not readable.", name); + throw PDI::System_error("The PDIData named \"{}\" is not readable.", name); + + } + auto data_type = spec_ref.type()->evaluate(context()); + if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { + PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED || scalar_kind == PDI::Scalar_kind::UNSIGNED) { + // return spec_ref.scalar_value(); + tmp_value = data_expression.to_long(context()); + } else { + throw PDI::Config_error{ + spec, + "Unknown Scalar Type for variable `{}'. The type must be an integer signed or unsigned)", + PDI::to_string(spec) + }; + // context().logger().error("Unknown Scalar Type for variable {}. The type must be an integerc (signed or unsigned)", PDI::to_string(spec)); + } + } else { + throw PDI::Config_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; + // context().logger().error("The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)); + } + } else { + // context().logger().error("Supported only YAML_SCALAR_NODE for variable `{}'", name); + throw PDI::Config_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; + } + + // return value in conduit_index_t + if (std::is_same::value) { + value = tmp_value; + } else { + // case conduit_index_t is 32-bits + value = static_cast(tmp_value); + if (value != tmp_value) { + // context().logger().error("Error in cast of a type conduit_index_t in long. {} != `{}'", value, tmp_value); + throw PDI::System_error{"Error in cast of a type conduit_index_t in long. `{}' != `{}'", value, tmp_value}; + } + } } } @@ -279,56 +523,27 @@ void catalyst_plugin::fill_node_with_array_pdi_data( } auto scalar_datatype = std::dynamic_pointer_cast(type); if (!scalar_datatype) { - context().logger().error("Array subtype of variable {} should be scalar type.", name); + // context().logger().error("Array subtype of variable {} should be scalar type.", name); + throw PDI::Config_error{tree, "Array subtype of variable `{}' should be scalar type.", name}; return; } conduit_index_t num_elements = 0; auto size_spec = PC_get(tree, ".size"); - if (PC_status(size_spec) == PC_OK) { - if (std::is_same::value) { - num_elements = get_long_value_from_spec_node(size_spec, name); - } else { - // case conduit_index_t is 32-bits - long tmp_num_elements = get_long_value_from_spec_node(size_spec, name); - num_elements = static_cast(tmp_num_elements); - if (num_elements != tmp_num_elements) { - context().logger().error("Error in cast of a type conduit_index_t in long. {} != {}", num_elements, tmp_num_elements); - } - } - } else { - context().logger().error("Unknown the number of elements for variable{} passed to catalyst.", name); + get_conduit_index_t_value(size_spec, name, num_elements); + + if (num_elements == 0) { + // context().logger().error("Unknown the size of an array of name {} passed to catalyst.", name); + throw PDI::System_error("Unknown the size of an array of name `{}' passed to catalyst.", name); } conduit_index_t offset = 0; auto offset_spec = PC_get(tree, ".offset"); - if (PC_status(offset_spec) == PC_OK) { - if (std::is_same::value) { - offset = get_long_value_from_spec_node(offset_spec, name); - } else { - // case conduit_index_t is 32-bits - long tmp_offset = get_long_value_from_spec_node(offset_spec, name); - offset = static_cast(tmp_offset); - if (offset != tmp_offset) { - context().logger().error("Error in cast of a type long in conduit_index_t {} != {}", offset, tmp_offset); - } - } - } + get_conduit_index_t_value(offset_spec, name, offset); conduit_index_t stride = 1; auto stride_spec = PC_get(tree, ".stride"); - if (PC_status(stride_spec) == PC_OK) { - if (std::is_same::value) { - stride = get_long_value_from_spec_node(stride_spec, name); - } else { - // case conduit_index_t is 32-bits - long tmp_stride = get_long_value_from_spec_node(stride_spec, name); - stride = static_cast(tmp_stride); - if (stride != tmp_stride) { - context().logger().error("Error in cast of a type long to conduit_index_t {} != {}", stride, tmp_stride); - } - } - } + get_conduit_index_t_value(stride_spec, name, stride); // computer endianness is used conduit_index_t endianness = CONDUIT_ENDIANNESS_DEFAULT_ID; @@ -385,7 +600,9 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - context().logger().error("Unknown SIGNED buffer size of {} for variable {}", buffer_size, name); + // throw PDI::Config_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + // context().logger().error("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::System_error{"Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { auto buffer_size = scalar_datatype->buffersize(); @@ -438,7 +655,9 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - context().logger().error("Unknown UNSIGNED buffer size of {} for variable {}", buffer_size, name); + //throw PDI::Config_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + // context().logger().error("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::System_error{"Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { auto buffer_size = scalar_datatype->buffersize(); @@ -467,41 +686,51 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - context().logger().error("Unknown FLOAT buffer size of {} for variable {}", buffer_size, name); + // throw PDI::Config_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; + // context().logger().error("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); + throw PDI::System_error{"Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; } } else { - context().logger().error("Unknown Scalar Type for variable {}", name); + // context().logger().error("Unknown Scalar Type for variable `{}'", name); + // throw PDI::Config_error{tree, "Unknown Scalar Type for variable `{}'", name}; + throw PDI::System_error{"Unknown Scalar Type for variable `{}'", name}; } } -long catalyst_plugin::get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name) -{ - if (spec.node->type == YAML_SCALAR_NODE) { - PDI::Expression data_expression{PDI::to_string(spec)}; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - if (!spec_ref) { - context().logger().error("The PDIData named \"{}\" is not readable.", name); - return 0; - } - auto data_type = spec_ref.type()->evaluate(context()); - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED) { - return data_expression.to_long(context()); - } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { - return data_expression.to_long(context()); - } else { - context().logger().error("Unknown Scalar Type for variable {}. The type must be an integer", PDI::to_string(spec)); - } - } else { - context().logger().error("The datatype must be a scalar datatype for variable: {}", PDI::to_string(spec)); - } - return 0; - } else { - context().logger().error("Supported only YAML_SCALAR_NODE for variable {}", name); - } - return 0; -} +// long catalyst_plugin::get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name) +// { +// if (spec.node->type == YAML_SCALAR_NODE) { +// PDI::Expression data_expression{PDI::to_string(spec)}; +// PDI::Ref_r spec_ref = data_expression.to_ref(context()); +// if (!spec_ref) { +// context().logger().error("The PDIData named \"{}\" is not readable.", name); +// return 0; +// } +// auto data_type = spec_ref.type()->evaluate(context()); +// if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { +// PDI::Scalar_kind scalar_kind = (*scalar_datatype).kind(); +// if (scalar_kind == PDI::Scalar_kind::SIGNED || scalar_kind == PDI::Scalar_kind::UNSIGNED) { +// // return spec_ref.scalar_value(); +// return data_expression.to_long(context()); +// } else { +// throw PDI::Config_error{ +// spec, +// "Unknown Scalar Type for variable `{}'. The type must be an integerc (signed or unsigned)", +// PDI::to_string(spec) +// }; +// // context().logger().error("Unknown Scalar Type for variable {}. The type must be an integerc (signed or unsigned)", PDI::to_string(spec)); +// } +// } else { +// throw PDI::Config_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; +// // context().logger().error("The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)); +// } +// return 0; +// } else { +// // context().logger().error("Supported only YAML_SCALAR_NODE for variable `{}'", name); +// throw PDI::Config_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; +// } +// return 0; +// } std::string catalyst_plugin::read_pdi_execute_event_name() { @@ -509,6 +738,9 @@ std::string catalyst_plugin::read_pdi_execute_event_name() auto execute_spec = PC_get(this->m_spec_tree, ".on_event"); if (PC_status(execute_spec) == PC_OK) { event_name = PDI::to_string(execute_spec); + } else { + throw PDI::Config_error{execute_spec, "No event name for catalyst plugin is given"}; } + return event_name; } diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index 4a4f77cfd..a12626efb 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -1,12 +1,15 @@ #ifndef CATALYST_PLUGIN_H #define CATALYST_PLUGIN_H +#include "catalyst.hpp" // ??? +#include "catalyst_plugin_structured_ghost.h" // ??? + #include #include #include -struct conduit_node_impl; -typedef struct conduit_node_impl conduit_node; +struct conduit_node_impl; // ?? +typedef struct conduit_node_impl conduit_node; // ?? /** * @brief Translates PDI calls to Catalyst calls. @@ -25,15 +28,67 @@ class catalyst_plugin: public PDI::Plugin ~catalyst_plugin(); private: + /// @brief callback used at pdi_init void process_pdi_init(); + + /// @brief trigger action of catalyst plugin when a data is exposed to pdi + /// @param data_name: name of the current data exposed to pdi + /// @param ref: reference of the data exposed to pdi void process_data(const std::string& data_name, PDI::Ref ref); + + /// @brief trigger action of catalyst plugin when an event occur + /// @param event_name: name of the current event void process_event(const std::string& event_name); + /// @brief function running in pdi_init void run_catalyst_initialize(); + + /// @brief function running in the event corresponding to catalyst_execute void run_catalyst_execute(); + + /// @brief function running in pdi_finalize void run_catalyst_finalize(); - void fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t tree); - void fill_node_with_scalar_pdi_data(conduit_node* node, const std::string& name, const PDI::Scalar_datatype& scalar_datatype, PDI::Ref_r& ref_r); + + /// @brief TO COMPLETE + /// @param execute_node + /// @param execute_spec + /// @param list_vtkGhostType_to_create + void read_info_for_creating_vtk_ghost( + conduit_node* execute_node, + PC_tree_t& execute_spec, + std::vector& list_vtkGhostType_to_create + ); + + /// @brief TO COMPLETE + /// @param execute_node + /// @param execute_spec + void create_catalyst_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec); + + /// @brief TO COMPLETE + /// @param node + /// @param tree + void fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree); + + /// @brief TO COMPLETE + /// @param node + /// @param tree + /// @param name + /// @param scalar_datatype + /// @param ref_r + void fill_node_with_scalar_pdi_data( + conduit_node* node, + PC_tree_t& tree, + const std::string& name, + const PDI::Scalar_datatype& scalar_datatype, + PDI::Ref_r& ref_r + ); + + /// @brief TO COMPLETE + /// @param node + /// @param name + /// @param tree + /// @param array_datatype + /// @param ref_r void fill_node_with_array_pdi_data( conduit_node* node, const std::string& name, @@ -41,11 +96,28 @@ class catalyst_plugin: public PDI::Plugin const PDI::Array_datatype& array_datatype, PDI::Ref_r& ref_r ); - long get_long_value_from_spec_node(PC_tree_t& spec, const std::string& name); + + /// @brief return an index description (i.e. size, offset, stride) of the array that correspond to a conduit node + /// @param spec : specification tree where the index is defined + /// @param name : The name of the data that corresponding to the index. + /// @param value: value of the index + void get_conduit_index_t_value(PC_tree_t& spec, const std::string& name, conduit_index_t& value); + + /// @brief return the event name to call catalyst_execute std::string read_pdi_execute_event_name(); + /// @brief variable to know if catalyst_initialize is called. + /// Remark: The call of catalyst_finalize doesn't return a okay status if catalyst_initialize is not called before. + /// Example: In case of config error in the yaml file needed by catalyst_initialize. Moreover, the config error message cannot be see by the user. + bool catalyst_is_initialize; + + /// @brief specification tree for catalyst plugin PC_tree_t m_spec_tree; - std::unordered_map m_current_pdi_data; + + /// @brief list of pdi data array passed to catalyst in catalyst_execute + /// std::unordered_map m_current_pdi_data; + + /// @brief name of event use to call catalyst_execute std::string m_pdi_execute_event_name; }; From 30a66c406c1a09b21ace35fafb2a2d25680ea773 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 14 Apr 2026 13:54:55 +0200 Subject: [PATCH 24/31] update catalyst test --- plugins/catalyst/tests/CMakeLists.txt | 6 +- plugins/catalyst/tests/pdi_adaptor.cxx | 2 +- .../execute_reference.json | 70 +++++++++++++++++++ .../execute_reference_rank0.json | 70 +++++++++++++++++++ .../execute_reference_rank1.json | 70 +++++++++++++++++++ .../execute_reference_rank2.json | 70 +++++++++++++++++++ .../execute_reference_rank3.json | 70 +++++++++++++++++++ .../finalize_reference.json | 1 + .../initialize_reference.json | 10 +++ plugins/catalyst/tests/run_test.py | 58 ++++++++------- plugins/catalyst/tests/run_test_mpi.py | 66 +++++++++-------- 11 files changed, 427 insertions(+), 66 deletions(-) create mode 100644 plugins/catalyst/tests/references_big_endian/execute_reference.json create mode 100644 plugins/catalyst/tests/references_big_endian/execute_reference_rank0.json create mode 100644 plugins/catalyst/tests/references_big_endian/execute_reference_rank1.json create mode 100644 plugins/catalyst/tests/references_big_endian/execute_reference_rank2.json create mode 100644 plugins/catalyst/tests/references_big_endian/execute_reference_rank3.json create mode 100644 plugins/catalyst/tests/references_big_endian/finalize_reference.json create mode 100644 plugins/catalyst/tests/references_big_endian/initialize_reference.json diff --git a/plugins/catalyst/tests/CMakeLists.txt b/plugins/catalyst/tests/CMakeLists.txt index 83bc76b27..166a86fa1 100644 --- a/plugins/catalyst/tests/CMakeLists.txt +++ b/plugins/catalyst/tests/CMakeLists.txt @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 Kitware SAS +# SPDX-FileCopyrightText: Copyright (c) 2025 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# SPDX-License-Identifier: Apache 2.0 + # Creation of executable add_executable(TestPDICatalyst grid.cxx @@ -18,7 +22,7 @@ set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests) configure_file(pdi.yml.in pdi.yml) find_package(Python3 COMPONENTS Interpreter) -add_test(NAME TestPDICatalyst COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/") +add_test(NAME TestPDICatalystSerial COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests/run_test.py" "${CMAKE_BINARY_DIR}/tests/" "${CMAKE_SOURCE_DIR}/tests/") # test with MPI if(${CATALYST_USE_MPI}) diff --git a/plugins/catalyst/tests/pdi_adaptor.cxx b/plugins/catalyst/tests/pdi_adaptor.cxx index ca03ff08a..b76def5c2 100644 --- a/plugins/catalyst/tests/pdi_adaptor.cxx +++ b/plugins/catalyst/tests/pdi_adaptor.cxx @@ -96,4 +96,4 @@ bool finalize() auto status = PDI_finalize(); return status == PDI_status_t::PDI_OK; } -} // namespace PDI_adaptor +} // namespace pdi_adaptor diff --git a/plugins/catalyst/tests/references_big_endian/execute_reference.json b/plugins/catalyst/tests/references_big_endian/execute_reference.json new file mode 100644 index 000000000..25aaf430e --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/execute_reference.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "big"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "big"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 184800,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 184800,"offset": 1478434,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 184800,"offset": 2956834,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 4435234,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 4435247,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 4435257,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "connectivity": {"dtype":"uint32","number_of_elements": 1400424,"offset": 4435261,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 10036957,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 10036964,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 10036972,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 184800,"offset": 10036978,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 184800,"offset": 11515378,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 184800,"offset": 12993778,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 14472178,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 14472186,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 14472194,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": {"dtype":"float32","number_of_elements": 175053,"offset": 14472200,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + } + } + } + } +} diff --git a/plugins/catalyst/tests/references_big_endian/execute_reference_rank0.json b/plugins/catalyst/tests/references_big_endian/execute_reference_rank0.json new file mode 100644 index 000000000..cee6b2350 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/execute_reference_rank0.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "big"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "big"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + } + } + } + } +} diff --git a/plugins/catalyst/tests/references_big_endian/execute_reference_rank1.json b/plugins/catalyst/tests/references_big_endian/execute_reference_rank1.json new file mode 100644 index 000000000..44858f2e9 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/execute_reference_rank1.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "big"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "big"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 50160,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 50160,"offset": 401314,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 50160,"offset": 802594,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1203874,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1203887,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1203897,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "connectivity": {"dtype":"uint32","number_of_elements": 365328,"offset": 1203901,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2665213,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2665220,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2665228,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 50160,"offset": 2665234,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 50160,"offset": 3066514,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 50160,"offset": 3467794,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3869074,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3869082,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3869090,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": {"dtype":"float32","number_of_elements": 45666,"offset": 3869096,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + } + } + } + } +} diff --git a/plugins/catalyst/tests/references_big_endian/execute_reference_rank2.json b/plugins/catalyst/tests/references_big_endian/execute_reference_rank2.json new file mode 100644 index 000000000..cee6b2350 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/execute_reference_rank2.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "big"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "big"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + } + } + } + } +} diff --git a/plugins/catalyst/tests/references_big_endian/execute_reference_rank3.json b/plugins/catalyst/tests/references_big_endian/execute_reference_rank3.json new file mode 100644 index 000000000..cee6b2350 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/execute_reference_rank3.json @@ -0,0 +1,70 @@ + +{ + "catalyst": + { + "state": + { + "timestep": {"dtype":"int32","number_of_elements": 1,"offset": 0,"stride": 4,"element_bytes": 4,"endianness": "big"}, + "time": {"dtype":"float64","number_of_elements": 1,"offset": 4,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "multiblock": {"dtype":"int64","number_of_elements": 1,"offset": 12,"stride": 8,"element_bytes": 8,"endianness": "big"} + }, + "channels": + { + "grid": + { + "type": {"dtype":"char8_str","number_of_elements": 5,"offset": 20,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "data": + { + "coordsets": + { + "my_coords": + { + "type": {"dtype":"char8_str","number_of_elements": 9,"offset": 25,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 34,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 380194,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 760354,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + } + }, + "topologies": + { + "my_mesh": + { + "type": {"dtype":"char8_str","number_of_elements": 13,"offset": 1140514,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "coordset": {"dtype":"char8_str","number_of_elements": 10,"offset": 1140527,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "elements": + { + "shape": {"dtype":"char8_str","number_of_elements": 4,"offset": 1140537,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "connectivity": {"dtype":"uint32","number_of_elements": 345032,"offset": 1140541,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + }, + "fields": + { + "velocity": + { + "association": {"dtype":"char8_str","number_of_elements": 7,"offset": 2520669,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 2520676,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 2520684,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": + { + "x": {"dtype":"float64","number_of_elements": 47520,"offset": 2520690,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "y": {"dtype":"float64","number_of_elements": 47520,"offset": 2900850,"stride": 8,"element_bytes": 8,"endianness": "big"}, + "z": {"dtype":"float64","number_of_elements": 47520,"offset": 3281010,"stride": 8,"element_bytes": 8,"endianness": "big"} + } + }, + "pressure": + { + "association": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661170,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "topology": {"dtype":"char8_str","number_of_elements": 8,"offset": 3661178,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "volume_dependent": {"dtype":"char8_str","number_of_elements": 6,"offset": 3661186,"stride": 1,"element_bytes": 1,"endianness": "big"}, + "values": {"dtype":"float32","number_of_elements": 43129,"offset": 3661192,"stride": 4,"element_bytes": 4,"endianness": "big"} + } + } + } + } + } + } +} diff --git a/plugins/catalyst/tests/references_big_endian/finalize_reference.json b/plugins/catalyst/tests/references_big_endian/finalize_reference.json new file mode 100644 index 000000000..4d10ad555 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/finalize_reference.json @@ -0,0 +1 @@ +{"dtype":"empty"} \ No newline at end of file diff --git a/plugins/catalyst/tests/references_big_endian/initialize_reference.json b/plugins/catalyst/tests/references_big_endian/initialize_reference.json new file mode 100644 index 000000000..90c952641 --- /dev/null +++ b/plugins/catalyst/tests/references_big_endian/initialize_reference.json @@ -0,0 +1,10 @@ + +{ + "catalyst": + { + "scripts": + { + "script1": {"dtype":"char8_str","number_of_elements": 110,"offset": 0,"stride": 1,"element_bytes": 1,"endianness": "big"} + } + } +} diff --git a/plugins/catalyst/tests/run_test.py b/plugins/catalyst/tests/run_test.py index cab7df48d..30063a7f4 100644 --- a/plugins/catalyst/tests/run_test.py +++ b/plugins/catalyst/tests/run_test.py @@ -23,37 +23,35 @@ # get endiannes of the computer endianness = sys.byteorder -if(endianness == 'little'): - # Check the initialize json dump - reference_initialize_json = source_folder + "/references/initialize_reference.json" - actual_initialize_json = binary_folder + "initialize_params.conduit_bin.1.0_json" - with open(reference_initialize_json) as ref_file: - with open(actual_initialize_json) as actual_file: - ref_json = json.load(ref_file) - actual_json = json.load(actual_file) +reference_directory = 'references' # if(endianness == 'little'); +if(endianness == 'big'): + reference_directory = reference_directory + '_big_endian' + +# Check the initialize json dump +reference_initialize_json = source_folder + "/" + reference_directory + "/initialize_reference.json" +actual_initialize_json = binary_folder + "initialize_params.conduit_bin.1.0_json" +with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) + if ref_json.items() != actual_json.items(): + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] if ref_json.items() != actual_json.items(): - # Ignore the length of the script path which depends on platform. - actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] - if ref_json.items() != actual_json.items(): - print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') - exit(1) - - # Check the execute json dump - reference_execute_json = source_folder + "/references/execute_reference.json" - for step in range(9): - filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.1.0_json" - if not filecmp.cmp(reference_execute_json, filepath): - print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') - exit(1) - - # Check the finalize json dump - reference_finalize_json = source_folder + "/references/finalize_reference.json" - actual_finalize_json = binary_folder + "finalize_params.conduit_bin.1.0_json" - if not filecmp.cmp(reference_finalize_json, actual_finalize_json): - print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) + +# Check the execute json dump +reference_execute_json = source_folder + "/" + reference_directory + "/execute_reference.json" +for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.1.0_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') exit(1) -else: - print(f'The reference solution is based on little endian. So it is not possible to check with big endian.') - print(f'The test is marked as failed anyway for the moment.') +# Check the finalize json dump +reference_finalize_json = source_folder + "/" + reference_directory + "/finalize_reference.json" +actual_finalize_json = binary_folder + "finalize_params.conduit_bin.1.0_json" +if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') exit(1) \ No newline at end of file diff --git a/plugins/catalyst/tests/run_test_mpi.py b/plugins/catalyst/tests/run_test_mpi.py index c45ab8895..6cde15492 100644 --- a/plugins/catalyst/tests/run_test_mpi.py +++ b/plugins/catalyst/tests/run_test_mpi.py @@ -24,40 +24,38 @@ # get endiannes of the computer endianness = sys.byteorder -if(endianness == 'little'): - # Check the initialize json dump for each rank. - reference_initialize_json = source_folder + "/references/initialize_reference.json" - for rank in range(4): - actual_initialize_json = binary_folder + f"initialize_params.conduit_bin.4.{rank}_json" - with open(reference_initialize_json) as ref_file: - with open(actual_initialize_json) as actual_file: - ref_json = json.load(ref_file) - actual_json = json.load(actual_file) +reference_directory = 'references' +if(endianness == 'big'): + reference_directory = reference_directory + '_big_endian' + +# Check the initialize json dump for each rank. +reference_initialize_json = source_folder + "/" + reference_directory + "/initialize_reference.json" +for rank in range(4): + actual_initialize_json = binary_folder + f"initialize_params.conduit_bin.4.{rank}_json" + with open(reference_initialize_json) as ref_file: + with open(actual_initialize_json) as actual_file: + ref_json = json.load(ref_file) + actual_json = json.load(actual_file) + if ref_json.items() != actual_json.items(): + # Ignore the length of the script path which depends on platform. + actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] if ref_json.items() != actual_json.items(): - # Ignore the length of the script path which depends on platform. - actual_json["catalyst"]["scripts"]["script1"]["number_of_elements"] = ref_json["catalyst"]["scripts"]["script1"]["number_of_elements"] - if ref_json.items() != actual_json.items(): - print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') - exit(1) - - # Check the execute json dump for each rank. - for rank in range(4): - reference_execute_json = source_folder + f"/references/execute_reference_rank{rank}.json" - for step in range(9): - filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.4.{rank}_json" - if not filecmp.cmp(reference_execute_json, filepath): - print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') - exit(1) - - # Check the finalize json dump for each rank. - reference_finalize_json = source_folder + "/references/finalize_reference.json" - for rank in range(4): - actual_finalize_json = binary_folder + f"finalize_params.conduit_bin.4.{rank}_json" - if not filecmp.cmp(reference_finalize_json, actual_finalize_json): - print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + print(f'Differences detected in file "{actual_initialize_json}" compared to reference "{reference_initialize_json}') + exit(1) + +# Check the execute json dump for each rank. +for rank in range(4): + reference_execute_json = source_folder + "/" + reference_directory + f"/execute_reference_rank{rank}.json" + for step in range(9): + filepath = binary_folder + f"execute_invc{step}_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_execute_json, filepath): + print(f'Differences detected in file "{filepath}" compared to reference "{reference_execute_json}') exit(1) -else: - print(f'The reference solution is based on little endian. So it is not possible to check with big endian.') - print(f'The test is marked as failed anyway for the moment.') - exit(1) +# Check the finalize json dump for each rank. +reference_finalize_json = source_folder + "/" + reference_directory + "/finalize_reference.json" +for rank in range(4): + actual_finalize_json = binary_folder + f"finalize_params.conduit_bin.4.{rank}_json" + if not filecmp.cmp(reference_finalize_json, actual_finalize_json): + print(f'Differences detected in file "{actual_finalize_json}" compared to reference "{reference_finalize_json}') + exit(1) \ No newline at end of file From 3385ed466bea9c79f6991753ec1414216090fac3 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 14 Apr 2026 13:56:44 +0200 Subject: [PATCH 25/31] adding test with the ghost --- plugins/catalyst/tests_ghost/CMakeLists.txt | 51 ++++++ plugins/catalyst/tests_ghost/catalyst.yml.in | 55 ++++++ .../catalyst_pipeline_with_rendering.py | 133 ++++++++++++++ .../tests_ghost/catalyst_serial.yml.in | 59 ++++++ .../catalyst_serial_structured.yml.in | 62 +++++++ plugins/catalyst/tests_ghost/example.c | 168 ++++++++++++++++++ plugins/catalyst/tests_ghost/run_test.py | 88 +++++++++ plugins/catalyst/tests_ghost/run_test_mpi.py | 63 +++++++ 8 files changed, 679 insertions(+) create mode 100644 plugins/catalyst/tests_ghost/CMakeLists.txt create mode 100644 plugins/catalyst/tests_ghost/catalyst.yml.in create mode 100644 plugins/catalyst/tests_ghost/catalyst_pipeline_with_rendering.py create mode 100644 plugins/catalyst/tests_ghost/catalyst_serial.yml.in create mode 100644 plugins/catalyst/tests_ghost/catalyst_serial_structured.yml.in create mode 100644 plugins/catalyst/tests_ghost/example.c create mode 100644 plugins/catalyst/tests_ghost/run_test.py create mode 100644 plugins/catalyst/tests_ghost/run_test_mpi.py diff --git a/plugins/catalyst/tests_ghost/CMakeLists.txt b/plugins/catalyst/tests_ghost/CMakeLists.txt new file mode 100644 index 000000000..6f02364be --- /dev/null +++ b/plugins/catalyst/tests_ghost/CMakeLists.txt @@ -0,0 +1,51 @@ +#============================================================================= +# Copyright (C) 2015-2026 Commissariat a l'energie atomique et aux energies alternatives (CEA) +# Copyright (C) 2020 Institute of Bioorganic Chemistry Polish Academy of Science (PSNC) +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the names of CEA, nor the names of the contributors may be used to +# endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +#============================================================================= + +cmake_minimum_required(VERSION 3.22...4.2) + +# Creation of executable +add_executable(TestPDICatalystGhost + example.c) + +# MPI +find_package(MPI COMPONENTS C CXX REQUIRED) + +target_link_libraries(TestPDICatalystGhost + PRIVATE PDI::PDI_C MPI::MPI_C) + +set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests_ghost) +configure_file(catalyst_serial.yml.in pdi.yml) + +set(CATALYST_SCRIPT_FOLDER ${CMAKE_SOURCE_DIR}/tests_ghost) +configure_file(catalyst_serial_structured.yml.in pdi_structured.yml) + +find_package(Python3 COMPONENTS Interpreter) +add_test(NAME TestPDICatalystGhost_uniform COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests_ghost/run_test.py" "${CMAKE_BINARY_DIR}/tests_ghost/" "${CMAKE_SOURCE_DIR}/tests_ghost/" "true_uniform") +add_test(NAME TestPDICatalystGhost_structured COMMAND ${Python3_EXECUTABLE} "${CMAKE_SOURCE_DIR}/tests_ghost/run_test.py" "${CMAKE_BINARY_DIR}/tests_ghost/" "${CMAKE_SOURCE_DIR}/tests_ghost/" "true_structured") \ No newline at end of file diff --git a/plugins/catalyst/tests_ghost/catalyst.yml.in b/plugins/catalyst/tests_ghost/catalyst.yml.in new file mode 100644 index 000000000..b0ce25c0a --- /dev/null +++ b/plugins/catalyst/tests_ghost/catalyst.yml.in @@ -0,0 +1,55 @@ +# duration in seconds +duration: 1.75 +# global [height, width] (excluding boundary conditions or ghosts) +datasize: [60, 12] +# degree of parallelism +parallelism: { height: 3, width: 1 } + +# only the following config is passed to PDI +pdi: + metadata: # type of small values for which PDI keeps a copy + iter: int # current iteration id + dsize: { size: 2, type: array, subtype: int } # local data size including ghosts/boundary + psize: { size: 2, type: array, subtype: int } # number of processes in each dimension + pcoord: { size: 2, type: array, subtype: int } # coordinate of the process + data: # type of values for which PDI does not keep a copy + main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: double } + + logging: debug + plugins: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + on_event: "newiter" + execute: + state: + timestep: $iter + time: 1.0*$iter + multiblock: 0 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "uniform" + dims: { i: '1+$dsize[1]', j: '1+$dsize[0]' } + origin: + x: 1.0*$pcoord[1]*($dsize[1]-2.0)-1.0 + y: 1.0*$pcoord[0]*($dsize[0]-2.0)-1.0 + spacing: { dx: 1.0, dy: 1.0 } + topologies: + my_mesh: + type: "uniform" + coordset: "my_coords" + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "main_field" + size: $dsize[0]*$dsize[1] + ghost_layers: + my_mesh: { association: "element", start: ['1', '1'], size: ['$dsize[1]-2', '$dsize[0]-2'] } diff --git a/plugins/catalyst/tests_ghost/catalyst_pipeline_with_rendering.py b/plugins/catalyst/tests_ghost/catalyst_pipeline_with_rendering.py new file mode 100644 index 000000000..cc3819847 --- /dev/null +++ b/plugins/catalyst/tests_ghost/catalyst_pipeline_with_rendering.py @@ -0,0 +1,133 @@ +# script-version: 2.0 +from paraview.simple import * +from paraview import catalyst +import time + +# registrationName must match the channel name used in the +# 'CatalystAdaptor'. +producer = TrivialProducer(registrationName="grid") + +# ---------------------------------------------------------------- +# setup views used in the visualization +# ---------------------------------------------------------------- + +# ######## render view temperature + +# Create a new 'Render View' +renderView1 = CreateView('RenderView') +# renderView1.Set( +# ViewSize=[800, 600], +# InteractionMode='2D', +# CenterOfRotation=[20.0, 3.0, 0.0], +# CameraPosition=[20.0, 30.0, 408.7], +# CameraFocalPoint=[20.0, 30.0, 0.0], +# CameraFocalDisk=1.0, +# CameraParallelScale=32.0, +# ) + +renderView1.ViewSize=[800, 600] +renderView1.InteractionMode='2D' +renderView1.CenterOfRotation=[40.0, 12.0, 0.0] +renderView1.CameraPosition=[40.0, 12.0, 208.7] +renderView1.CameraFocalPoint=[5.0, 12.0, 0.0] +renderView1.CameraFocalDisk=1.0, +renderView1.CameraParallelScale=20.0 #32.0 + + +# get color transfer function/color map for 'temperature' +temperatureLUT = GetColorTransferFunction('temperature') +## RGB: first line: min value, last line: max value +# temperatureLUT.Set( +# RGBPoints=GenerateRGBPoints( +# range_min=0.0, +# range_max=200.0, +# ), +# ScalarRangeInitialized=1.0, +# ) + + +temperatureLUT.RGBPoints=[-4.0, 0.231373, 0.298039, 0.752941, + 0.0, 0.865003, 0.865003, 0.865003, + 4.0, 0.705882, 0.0156863, 0.14902] + +temperatureLUT.ScalarRangeInitialized=1.0 + + +# show data from grid +## wgridDisplay = Show(producer, renderView1, 'UnstructuredGridRepresentation') +gridDisplay = Show(producer, renderView1, 'StructuredGridRepresentation') + +gridDisplay.Representation = 'Surface With Edges' +gridDisplay.ColorArrayName = ['CELLS', 'temperature'] +gridDisplay.LookupTable = temperatureLUT + +# get color legend/bar for temperatureLUT in view renderView1 +temperatureLUTColorBar = GetScalarBar(temperatureLUT, renderView1) +temperatureLUTColorBar.Title = 'temperature' + +# set color bar visibility +temperatureLUTColorBar.Visibility = 1 + +# show color legend +gridDisplay.SetScalarBarVisibility(renderView1, True) + +# # ---------------------------------------------------------------- +# # setup extractors +# # ---------------------------------------------------------------- + +SetActiveView(renderView1) +# create extractor +pNG2= CreateExtractor('PNG', renderView1, registrationName='PNG2') +# trace defaults for the extractor. +pNG2.Trigger = 'TimeStep' + +# init the 'PNG' selected for 'Writer' +pNG2.Writer.FileName = 'temperature_screenshot_{timestep:06d}.png' +pNG2.Writer.ImageResolution=[800, 600] +pNG2.Writer.Format = 'PNG' + +# # ---------------------------------------------------------------- +# # setup extractor for saving the solution in VTK file +# # ---------------------------------------------------------------- + +extractor_vtk_file = None + +mesh_grid = producer.GetClientSideObject().GetOutputDataObject(0) +if mesh_grid.IsA('vtkUnstructuredGrid'): + extractor_vtk_file = CreateExtractor('VTU', producer, registrationName='VTU') +elif mesh_grid.IsA('vtkMultiBlockDataSet'): + extractor_vtk_file = CreateExtractor('VTM', producer, registrationName='VTM') +elif mesh_grid.IsA('vtkPartitionedDataSet'): + extractor_vtk_file = CreateExtractor('VTPD', producer, registrationName='VTPD') +else: + raise RuntimeError("Unsupported data type: %s. Check that the adaptor is providing channel named %s", + mesh_grid.GetClassName(), "grid") + + +# ------------------------------------------------------------------------------ +# Catalyst options +options = catalyst.Options() +## 0: no client, generate the png images and vtk files. +## 1: interactive +options.EnableCatalystLive = 0 + + +# Greeting to ensure that ctest knows this script is being imported +print("#############################################################") +print("executing catalyst_pipeline") +print("#############################################################") +def catalyst_execute(info): + global producer + producer.UpdatePipeline() + print("-----------------------------------") + print("executing (cycle={}, time={})".format(info.cycle, info.time)) + print("bounds:", producer.GetDataInformation().GetBounds()) + print("temperature-range:", producer.CellData["temperature"].GetRange(0)) + # In a real simulation sleep is not needed. We use it here to slow down the + # "simulation" and make sure ParaView client can catch up with the produced + # results instead of having all of them flashing at once. + + time.sleep(1) + + if options.EnableCatalystLive: + time.sleep(0.1) diff --git a/plugins/catalyst/tests_ghost/catalyst_serial.yml.in b/plugins/catalyst/tests_ghost/catalyst_serial.yml.in new file mode 100644 index 000000000..0d03f54e5 --- /dev/null +++ b/plugins/catalyst/tests_ghost/catalyst_serial.yml.in @@ -0,0 +1,59 @@ +# global [height, width] (excluding boundary conditions or ghosts) +globalsize: [30, 12] + +# degree of parallelism +parallelism: { height: 1 , width: 1 } + +# only the following config is passed to PDI +pdi: + metadata: # type of small values for which PDI keeps a copy + iter: int # current iteration id + dsize: { size: 2, type: array, subtype: int } # local data size including ghosts/boundary + dstart: { size: 2, type: array, subtype: int } # + dend: { size: 2, type: array, subtype: int } # + psize: { size: 2, type: array, subtype: int } # number of processes in each dimension + pcoord: { size: 2, type: array, subtype: int } # coordinate of the process + data: # type odstartf values for which PDI does not keep a copy + main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: int } + + logging: debug + plugins: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + on_event: "newiter" + execute: + state: + timestep: $iter + time: 1.0*$iter + multiblock: 0 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "uniform" + dims: { i: '1+$dsize[1]', j: '1+$dsize[0]' } + origin: + x: 1.0*$pcoord[1]*($dend[1]-$dstart[1])-$dstart[1] + y: 1.0*$pcoord[0]*($dend[0]-$dstart[0])-$dstart[0] + spacing: { dx: 1.0, dy: 1.0 } + topologies: + my_mesh: + type: "uniform" + coordset: "my_coords" + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "main_field" + size: $dsize[0]*$dsize[1] + ghost_layers: + my_mesh: + association: "element" + start: ['$dstart[1]', '$dstart[0]'] + size: ['$dend[1]-$dstart[1]', '$dend[0]-$dstart[0]'] \ No newline at end of file diff --git a/plugins/catalyst/tests_ghost/catalyst_serial_structured.yml.in b/plugins/catalyst/tests_ghost/catalyst_serial_structured.yml.in new file mode 100644 index 000000000..d04c85c99 --- /dev/null +++ b/plugins/catalyst/tests_ghost/catalyst_serial_structured.yml.in @@ -0,0 +1,62 @@ +# global [height, width] (excluding boundary conditions or ghosts) +globalsize: [30, 12] + +# degree of parallelism +parallelism: { height: 1 , width: 1 } + +# only the following config is passed to PDI +pdi: + metadata: # type of small values for which PDI keeps a copy + iter: int # current iteration id + dsize: { size: 2, type: array, subtype: int } # local data size including ghosts/boundary + dstart: { size: 2, type: array, subtype: int } # + dend: { size: 2, type: array, subtype: int } # + psize: { size: 2, type: array, subtype: int } # number of processes in each dimension + pcoord: { size: 2, type: array, subtype: int } # coordinate of the process + data: # type odstartf values for which PDI does not keep a copy + main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: int } + coords_x: { size: 2, type: array, subtype: double } # coordinate of the vertices in x-direction + coords_y: { size: 2, type: array, subtype: double } # coordinate of the vertices in y-direction + + logging: debug + plugins: + mpi: + catalyst: + scripts: + script1: "@CATALYST_SCRIPT_FOLDER@/catalyst_pipeline_with_rendering.py" + on_event: "newiter" + execute: + state: + timestep: $iter + time: 1.0*$iter + multiblock: 0 + channels: + grid: + type: "mesh" + data: + coordsets: + my_coords: + type: "explicit" + dims: { i: '1+$dsize[1]', j: '1+$dsize[0]' } + values: + x : { PDI_data_array: "coords_x", size: '($dsize[0]+1)*($dsize[1]+1)' } + y : { PDI_data_array: "coords_y", size: '($dsize[0]+1)*($dsize[1]+1)' } + topologies: + my_mesh: + type: "structured" + coordset: "my_coords" + elements: + dims: { i: '$dsize[1]', j: '$dsize[0]'} # {, offsets: [,,], strides: [,,]} + fields: + temperature: + association: "element" + topology: "my_mesh" + volume_dependent: "false" + values: + PDI_data_array: "main_field" + size: $dsize[0]*$dsize[1] + ghost_layers: + my_mesh: + association: "element" + start: ['$dstart[1]', '$dstart[0]'] + size: ['$dend[1]-$dstart[1]', '$dend[0]-$dstart[0]'] \ No newline at end of file diff --git a/plugins/catalyst/tests_ghost/example.c b/plugins/catalyst/tests_ghost/example.c new file mode 100644 index 000000000..8ee681a72 --- /dev/null +++ b/plugins/catalyst/tests_ghost/example.c @@ -0,0 +1,168 @@ +/******************************************************************************* + * Copyright (C) 2026 Commissariat a l'energie atomique et aux energies alternatives (CEA) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of CEA nor the names of its contributors may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include "pdi.h" + + +void create_coordinate_of_vertices( int dsize[2], int dstart[2], int local_size[2], int pcoord[2], double coords_x[dsize[0]+1][dsize[1]+1], double coords_y[dsize[0]+1][dsize[1]+1]){ + + // catalyst variables + int cells_ghost=1; + + size_t number_of_points[2]; + number_of_points[0] = dsize[0]+1; + number_of_points[1] = dsize[1]+1; + size_t total_number_of_points = number_of_points[0]*number_of_points[1]; + + // the first axis correspond to the y-coordinate. + for(int ix=0; ix\n", argv[0]); + exit(1); + } + + PC_tree_t conf = PC_parse_path(argv[1]); + MPI_Comm main_comm = MPI_COMM_WORLD; + + PDI_init(PC_get(conf, ".pdi")); + PDI_expose("mpi_comm", &main_comm, PDI_INOUT); + + int psize_1d; + MPI_Comm_size(main_comm, &psize_1d); + int pcoord_1d; + MPI_Comm_rank(main_comm, &pcoord_1d); + + PDI_expose("mpi_rank", &pcoord_1d, PDI_OUT); + PDI_expose("mpi_size", &psize_1d, PDI_OUT); + PDI_event("init"); + + long longval; + + int dsize[2]; + PC_int(PC_get(conf, ".globalsize[0]"), &longval); + dsize[0] = longval; + PC_int(PC_get(conf, ".globalsize[1]"), &longval); + dsize[1] = longval; + + int psize[2]; + PC_int(PC_get(conf, ".parallelism.height"), &longval); + psize[0] = longval; + PC_int(PC_get(conf, ".parallelism.width"), &longval); + psize[1] = longval; + + // check on distribution of MPI process + assert(dsize[0] % psize[0] == 0); + assert(dsize[1] % psize[1] == 0); + assert(psize[1] * psize[0] == psize_1d); + + int cart_period[2] = {0, 0}; + MPI_Comm cart_com; + MPI_Cart_create(main_comm, 2, psize, cart_period, 1, &cart_com); + int pcoord[2]; + MPI_Cart_coords(cart_com, pcoord_1d, 2, pcoord); + + int dstart[2]; + int dend[2]; + + int ghost_height[2]; // number of ghost in height direction + ghost_height[0] = 1; + ghost_height[1] = 1; + + int ghost_width[2]; // number of ghost in width direction + ghost_width[0] = 2; + ghost_width[1] = 2; + + int local_size[2]; // size of the local domain without ghost + local_size[0] = dsize[0] / psize[0]; + local_size[1] = dsize[1] / psize[1]; + + // + dsize[0] = local_size[0] + ghost_height[0] + ghost_height[1]; + dstart[0] = ghost_height[0]; + dend[0] = dsize[0] - ghost_height[1]; + + // + dsize[1] = local_size[1] + ghost_width[0] + ghost_width[1]; + dstart[1] = ghost_width[0]; + dend[1] = dsize[1] - ghost_width[1]; + + int ii = 0; + + PDI_expose("iter", &ii, PDI_OUT); + PDI_expose("dsize", dsize, PDI_OUT); + PDI_expose("dstart", dstart, PDI_OUT); + PDI_expose("dend", dend, PDI_OUT); + PDI_expose("psize", psize, PDI_OUT); + PDI_expose("pcoord", pcoord, PDI_OUT); + + int(*cur)[dsize[1]] = malloc(sizeof(int) * dsize[1] * dsize[0]); + + // initialize + for (int yy=0; yy Date: Fri, 24 Apr 2026 14:33:08 +0200 Subject: [PATCH 26/31] fix authors list --- AUTHORS | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/AUTHORS b/AUTHORS index 0a7b26582..fc82df51f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -25,9 +25,6 @@ Benoit Martin - CEA (bmartin@cea.fr) * support for const data in `PDI_share`, `PDI_expose` and `PDI_multi_expose` * Initial implementation of the cmake test -François Mazen - Kitware (francois.mazen@kitware.com) -* Catalyst plugin - François Mazen - Kitware (francois.mazen@kitware.com) * Catalyst plugin creation @@ -117,3 +114,5 @@ Benedikt Steinbusch - FZJ (b.steinbusch@fz-juelich.de) Yushan Wang - CEA (yushan.wang@cea.fr) * Maintainer (Sept. 2023 - ...) +* enable HDF5 subfiling +* Add native compression support in Decl'NetCDF From 0d6f956eb259ce732e6f3f464952a6d98c5532b3 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Fri, 24 Apr 2026 19:04:43 +0200 Subject: [PATCH 27/31] update readme --- plugins/catalyst/README.md | 56 +++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/plugins/catalyst/README.md b/plugins/catalyst/README.md index 35418a81f..1c5c436b8 100644 --- a/plugins/catalyst/README.md +++ b/plugins/catalyst/README.md @@ -1,9 +1,8 @@ -PDI Catalyst Plugin -=================== +# The PDI Catalyst Plugin {#PDI_Catalyst_plugin} This PDI plugin pushes PDI shared data to the Catalyst 2 API. The goal is to leverage the numerous Catalyst implementations like [Catalyst-ParaView](https://gitlab.kitware.com/paraview/paraview) or [Catalyst-ADIOS2](https://gitlab.kitware.com/paraview/adioscatalyst), helping massive data analysis and visualization at exascale. -# Build Instructions +## Build Instructions - Build and Install [PDI](https://pdi.dev/master/index.html) - Build and Install [Catalyst](https://gitlab.kitware.com/paraview/catalyst), with MPI support. @@ -15,11 +14,11 @@ This PDI plugin pushes PDI shared data to the Catalyst 2 API. The goal is to lev * in case of you used vendored version of libraries during your PDI build, instead of system libraries, you may have to define additional PDI dependencies locations. For example `spdlog_DIR` CMake variable for the spdlog library. - Build with `make` or `ninja` -# Running the Test +## Running the Test The test executable expects the config yaml file as arguments. -# Use Catalyst-Paraview +## Use Catalyst-Paraview To use the Catalyst-ParaView implementation, you should also set the following environment variables: - `CATALYST_IMPLEMENTATION_NAME=paraview` @@ -27,7 +26,7 @@ To use the Catalyst-ParaView implementation, you should also set the following e and likely add the catalyst lib folder to `LD_LIBRARY_PATH` if the catalyst library is installed in a non-standard location. -# Design Considerations +## Design Considerations *This is a work-in-progress. This paragraph is subject to change.* @@ -38,11 +37,48 @@ However, Catalyst requires additional semantic about meanings of the data, to ma The current approach is to add this semantic to the PDI Specification Tree under the `catalyst` key. See the [example file](test/pdi.yml.in) for actual implementation. PDI is very flexible about the timing of the data sharing using an advanced event mechanism, whereas Catalyst needs all data at the same point in time. -So, the user of this plugin should set an event name referenced by the `on_event` key in the yaml config, in order to trigger the call to `catalyst_execute`. Data should have been shared either before the event or during the event using the `PDI_multi_expose` function. +So, the user of this plugin should set an event name referenced by the `on_event` key in the yaml config, in order to trigger the call to `catalyst_execute`. Data should have been shared during the event using the `PDI_multi_expose` function. Internally, `catalyst_initialize` is called by `PDI_Init` and `catalyst_finalize` is called by `PDI_finalize`. -# IMPORTANT NOTICE: YAML CONFIGURATION FILE +## Configuration grammar + +*This is a work-in-progress. This paragraph is subject to change.* + +Simple plugin build: +```yaml +plugins: + catalyst: // name of the plugin + scripts: // list of run scripts on event ("iter" in this example) + script1: "script.py" // name of the script following by ':' with the filename of this script + on_event: "iter" // name of the event when 'catalyst_execute' is executed + execute: // contains the information (conduit_node) needed to run 'catalyst_execute' + state: + timestep: '$iter' // integral value for current timestep (paraview only?) + time: '1.0*$iter' // float64 value for current time + channels: // list of channels + grid: // name of a channel + type: "mesh" // type of channel considering + data: // A conduit Mesh Blueprint and object to define vtkGhostType + coordsets: // list of coordset supported by Mesh Blueprint + ... + topologies: // list of topology supported by Mesh Blueprint + ... + fields: // list of field supported by Mesh Blueprint + ... + ghost_layers: // allow to define vtkGhostType for structured mesh and uniform mesh with ghost layers. + my_topology: // name of a topology defined in this channel + association: "element" // consider vtkGhostType defined on element + start: ['$dstart[1]', '$dstart[0]'] // starting index of the domain without ghost + size: ['$dend[1]-$dstart[1]', '$dend[0]-$dstart[0]'] // size of domain without ghost +``` + +In paraview implemementation for catalyst, a description of variables (conduit_node) +can be found in https://docs.paraview.org/en/v6.1.0/Catalyst/blueprints.html. +All variables in each protocol are not supported by this plugin. + + +### IMPORTANT NOTICE: YAML FILE In the sub-tree corresponding to the catalyst plugin, a double quoted value is evaluated as a string. @@ -70,11 +106,11 @@ Summary: | real | plain or simple quoted | 1.22 or '1.22'| -# License +## License This repository is under the Apache 2.0 license, see NOTICE and LICENSE file. -The test case is a modification of the Catalyst2 CxxFullExample code from the ParaView source code, licenced under BSD-3-Clauses. +The test case (in '/plugins/catalyst/tests') is a modification of the Catalyst2 CxxFullExample code from the ParaView source code, licenced under BSD-3-Clauses. Developed by Kitware SAS (Kitware Europe), motivated by the [NumPEx](https://numpex.org/) program. From 3262de0e42704b3108e230f3834fa918617741cf Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Tue, 28 Apr 2026 15:23:57 +0200 Subject: [PATCH 28/31] fix Config_error --- .../catalyst_plugin_structured_ghost.h | 26 +++--- plugins/catalyst/pdi_catalyst_plugin.cxx | 79 ++++++++++--------- plugins/catalyst/pdi_catalyst_plugin.h | 46 +++++------ 3 files changed, 78 insertions(+), 73 deletions(-) diff --git a/plugins/catalyst/catalyst_plugin_structured_ghost.h b/plugins/catalyst/catalyst_plugin_structured_ghost.h index b623867e7..68eff9cd9 100644 --- a/plugins/catalyst/catalyst_plugin_structured_ghost.h +++ b/plugins/catalyst/catalyst_plugin_structured_ghost.h @@ -17,7 +17,7 @@ class Catalyst_plugin_structured_ghost /// The tree representing the ghost config PC_tree_t m_ghost_tree; - /// The parent tree of m_ghost_tree for config_error message + /// The parent tree of m_ghost_tree for specification tree error message PC_tree_t m_parent_tree; /// name of the mesh (It correspond to the topology name in the mesh blue print) @@ -47,7 +47,7 @@ class Catalyst_plugin_structured_ghost if (!PC_status(m_topology_name_spec)) { m_topology_name = PDI::to_string(m_topology_name_spec); } else { - throw PDI::Config_error{m_ghost_tree, "The name of the topology is not defined."}; + throw PDI::Spectree_error{m_ghost_tree, "The name of the topology is not defined."}; } // A TESTER: @@ -75,7 +75,7 @@ class Catalyst_plugin_structured_ghost std::string path_to_origins = "topologies/"+m_topology_name+"/origin"; //if (value_type == "uniform" && conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { if (conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { - throw PDI::Config_error{m_ghost_tree, "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview."}; + throw PDI::Spectree_error{m_ghost_tree, "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview."}; } std::string path_to_dims = "coordsets/"+value_coordset+"/dims/"; @@ -91,7 +91,7 @@ class Catalyst_plugin_structured_ghost } else { // Config error is return because we are in a constructor. - throw PDI::Config_error{m_ghost_tree, "ghost_layers yaml node is only valid with uniform and structured topology. The topology `{}' is not of this type `{}' "}; + throw PDI::Spectree_error{m_ghost_tree, "ghost_layers yaml node is only valid with uniform and structured topology. The topology is `{}'", value_type}; } // define the conduit for the vtkGhostType @@ -109,15 +109,15 @@ class Catalyst_plugin_structured_ghost m_association = PDI::to_string(value); } else { - throw PDI::Config_error{key_tree, "Invalid configuration key in mask_ghost for topology `{}': `{}'", m_topology_name, key}; + throw PDI::Spectree_error{key_tree, "Invalid configuration key in mask_ghost for topology `{}': `{}'", m_topology_name, key}; } }); if (m_size.size() != m_start.size()) { - throw PDI::Config_error{m_ghost_tree, "Invalid configuration in mask_ghost for topology `{}' the number of elements in size and in start are not the same.", m_topology_name}; + throw PDI::Spectree_error{m_ghost_tree, "Invalid configuration in mask_ghost for topology `{}' the number of elements in size and in start are not the same.", m_topology_name}; } if (m_size.size() != m_dimensions.size()) { - throw PDI::Config_error{m_parent_tree, "Invalid configuration in mask_ghost for topology `{}', the dimension of the problem `{}' is not equal to `{}' the number of elements in size and in start.", m_topology_name, m_dimensions.size(), m_size.size()}; + throw PDI::Spectree_error{m_parent_tree, "Invalid configuration in mask_ghost for topology `{}', the dimension of the problem `{}' is not equal to `{}' the number of elements in size and in start.", m_topology_name, m_dimensions.size(), m_size.size()}; } // check size + start + dims (TODO: en dernier) @@ -221,11 +221,11 @@ class Catalyst_plugin_structured_ghost return conduit_cpp::cpp_node(parent_node)[path_to_type].as_string(); } else{ PC_tree_t msg_tree = retrieve_pc_tree_from_parent_node("topologies", dataname); - throw PDI::Config_error{msg_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined as a string.", dataname, m_topology_name}; + throw PDI::Spectree_error{msg_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined as a string.", dataname, m_topology_name}; } } else { - throw PDI::Config_error{m_parent_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined.", dataname, m_topology_name}; + throw PDI::Spectree_error{m_parent_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined.", dataname, m_topology_name}; } } @@ -241,7 +241,7 @@ class Catalyst_plugin_structured_ghost PC_tree_t msg_tree = PC_get(m_parent_tree, PC_to_dataname.c_str()); if ( PC_status(msg_tree)) { - throw PDI::Config_error(msg_tree, ""); + throw PDI::Spectree_error(msg_tree, ""); } else { if (conduit_cpp::cpp_node(parent_node).has_path(path_to_dims)) { std::list list_dims{"i","j","k"}; @@ -262,7 +262,7 @@ class Catalyst_plugin_structured_ghost m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int,msg_data); } else { - throw PDI::Config_error{msg_tree, "For `{}' the value of dims/`{}' is not an integer or a long", msg_data, elem}; + throw PDI::Spectree_error{msg_tree, "For `{}' the value of dims/`{}' is not an integer or a long", msg_data, elem}; } } else{ @@ -271,10 +271,10 @@ class Catalyst_plugin_structured_ghost } } if (m_dimensions.size()==0) { - throw PDI::Config_error{msg_tree, "No dims/i , dims/j and dims/k are defined for the `{}'", msg_data}; + throw PDI::Spectree_error{msg_tree, "No dims/i , dims/j and dims/k are defined for the `{}'", msg_data}; } } else { - throw PDI::Config_error(msg_tree, "For the `{}', we need dims keyword to generate vtkGhostType for catalyst.", msg_data); + throw PDI::Spectree_error(msg_tree, "For the `{}', we need dims keyword to generate vtkGhostType for catalyst.", msg_data); } } } diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 68996d529..93f8fdade 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -67,13 +67,13 @@ void catalyst_plugin::run_catalyst_initialize() context().logger().info("Read information for script."); auto scripts_spec = PC_get(this->m_spec_tree, ".scripts"); if (PC_status(scripts_spec)) { - throw PDI::Config_error(m_spec_tree, "No scripts tree is defiend for catalyst plugin."); + throw PDI::Spectree_error(m_spec_tree, "No scripts tree is defiend for catalyst plugin."); } int script_number = 0; PC_len(scripts_spec, &script_number); if (script_number == 0) { - throw PDI::Config_error(scripts_spec, "Zero python script is defined for catalyst python."); + throw PDI::Spectree_error(scripts_spec, "Zero python script is defined for catalyst python."); } else { context().logger().debug("The number of python script is `{}'", script_number); } @@ -109,7 +109,7 @@ void catalyst_plugin::run_catalyst_initialize() context().logger().debug("value of the communicator is {}:", static_cast(MPI_Comm_c2f(tmp_comm))); } else { // context().logger().warn("value of the communicator is {}:", static_cast(MPI_Comm_c2f(tmp_comm))); - //throw PDI::Config_error{communicator_spec, "No communicator is given."}; + //throw PDI::Spectree_error{communicator_spec, "No communicator is given."}; context().logger().warn("No communicator is given by default the communicator is MPI_COMM_WORD."); } #else @@ -117,7 +117,7 @@ void catalyst_plugin::run_catalyst_initialize() auto communicator_spec = PC_get(this->m_spec_tree, ".communicator"); if (!PC_status(communicator_spec)) { context().logger().info("Used Catalyst with no mpi support. Invalid communicator: `{}'", PDI::to_string(communicator_spec)); - throw PDI::Config_error{ + throw PDI::Spectree_error{ communicator_spec, "Used Catalyst with no mpi support. Invalid communicator: `{}'", PDI::to_string(communicator_spec) @@ -173,7 +173,7 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( if (current.tree.node->type == YAML_MAPPING_NODE) { int data_tree_size = PDI::len(current.tree); if (data_tree_size == 0) { - throw PDI::Config_error(current.tree, "ghost_layers node defined with a mapping node of size 0."); + throw PDI::Spectree_error(current.tree, "ghost_layers node defined with a mapping node of size 0."); } else { context().logger().info("number of meshes(topologies) to consider = `{}'", data_tree_size); } @@ -183,7 +183,7 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( list_vtkGhostType_to_create.emplace_back(context(), current.parent_node, current.tree, current.parent_tree, index); } } else { - throw PDI::Config_error(current.tree, "ghost_layers node only support yaml mapping node."); + throw PDI::Spectree_error(current.tree, "ghost_layers node only support yaml mapping node."); } } else { if (current.tree.node->type == YAML_MAPPING_NODE) { @@ -218,7 +218,7 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( } } -void catalyst_plugin::create_catalyst_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec) +void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec) { // walk the spec tree and create corresponding catalyst nodes. struct Spec_tree_node { @@ -243,16 +243,16 @@ void catalyst_plugin::create_catalyst_conduit_node(conduit_node* execute_node, P // for (int index = data_tree_size - 1; index >= 0; --index) { // auto key = PC_get(current.tree, "{%d}", index); // if (PDI::to_string(key) == "dims") { - // PDI::Config_error{current.tree, "I found dims dans elements `{}'", current.name}; + // PDI::Spectree_error{current.tree, "I found dims dans elements `{}'", current.name}; // } else { - // PDI::Config_error{current.tree, "The key is not dims and it is `{}'", PDI::to_string(key)}; + // PDI::Spectree_error{current.tree, "The key is not dims and it is `{}'", PDI::to_string(key)}; // } // } } else { auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; switch (current.tree.node->type) { case YAML_NO_NODE: - throw PDI::Config_error{current.tree, "Unsupported Empty YAML Node for variable `{}'", current.name}; + throw PDI::Spectree_error{current.tree, "Unsupported Empty YAML Node for variable `{}'", current.name}; // context().logger().error("Unsupported Empty YAML Node for variable `{}'", current.name); break; case YAML_SCALAR_NODE: @@ -267,10 +267,10 @@ void catalyst_plugin::create_catalyst_conduit_node(conduit_node* execute_node, P auto data_type = spec_ref.type()->evaluate(context()); if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_scalar_pdi_data(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); + set_value_for_pdi_scalar_datatype(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); } else { // context().logger().error("Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", data_name); - throw PDI::Config_error{ + throw PDI::Spectree_error{ current.tree, "Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", data_name @@ -286,13 +286,13 @@ void catalyst_plugin::create_catalyst_conduit_node(conduit_node* execute_node, P case YAML_FOLDED_SCALAR_STYLE: case YAML_ANY_SCALAR_STYLE: // context().logger().error("Unsupported YAML scalar style for variable `{}'", current.name); - throw PDI::Config_error{current.tree, "Unsupported YAML scalar style for variable `{}'", current.name}; + throw PDI::Spectree_error{current.tree, "Unsupported YAML scalar style for variable `{}'", current.name}; break; } break; case YAML_SEQUENCE_NODE: // context().logger().error("Unsupported Sequence YAML Node for variable `{}'", current.name); - throw PDI::Config_error{current.tree, "Unsupported Sequence YAML Node for variable `{}'", current.name}; + throw PDI::Spectree_error{current.tree, "Unsupported Sequence YAML Node for variable `{}'", current.name}; break; case YAML_MAPPING_NODE: int data_tree_size = PDI::len(current.tree); @@ -329,7 +329,7 @@ void catalyst_plugin::run_catalyst_execute() conduit_node* node_pointer = conduit_cpp::c_node(&node); // create the conduit node for catalyst_execute - create_catalyst_conduit_node(node_pointer, execute_spec); + create_catalyst_execute_conduit_node(node_pointer, execute_spec); // read information to create the vtkGhostType for paraview (read "ghost_layers" node in the yaml file) //create_node_for_mask_ghost( node_pointer, execute_spec, list_vtkGhostType_to_create); @@ -379,34 +379,36 @@ void catalyst_plugin::run_catalyst_finalize() void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree) { + // check the function is called with a PC_tree containg auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { // context().logger().error("No \"name\" child in PDI_data_array spec."); - throw PDI::Config_error{tree, "No \"name\" child in PDI_data_array spec."}; - return; + throw PDI::Spectree_error{tree, "No \"name\" child in PDI_data_array spec."}; + //return; } std::string name = PDI::to_string(name_spec); PDI::Ref_r ref_r = context()[name].ref(); + // check the data can be read from PDI if (!ref_r) { context().logger().warn("Cannot read `{}' this data is not available", name); // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error throw PDI::System_error{"No \"name\" child in PDI_data_array spec `{}'.", name}; - return; + //return; } auto data_type = ref_r.type(); if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { - fill_node_with_array_pdi_data(node, name, tree, *array_datatype, ref_r); + set_value_for_pdi_array_datatype(node, name, tree, *array_datatype, ref_r); } else { // context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); - // throw PDI::Config_error{tree, "Unsupported datatype for variable: `{}'. The type should be array type.", name}; + // throw PDI::Spectree_error{tree, "Unsupported datatype for variable: `{}'. The type should be array type.", name}; // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error throw PDI::System_error{"Unsupported datatype for variable: `{}'. The type should be array type.", name}; } } -void catalyst_plugin::fill_node_with_scalar_pdi_data( +void catalyst_plugin::set_value_for_pdi_scalar_datatype( conduit_node* node, PC_tree_t& tree, const std::string& name, @@ -428,7 +430,7 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); } else { // context().logger().error("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); - throw PDI::Config_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + throw PDI::Spectree_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { auto buffer_size = scalar_datatype.buffersize(); @@ -442,7 +444,7 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); } else { // context().logger().error("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); - throw PDI::Config_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + throw PDI::Spectree_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { auto buffer_size = scalar_datatype.buffersize(); @@ -452,11 +454,11 @@ void catalyst_plugin::fill_node_with_scalar_pdi_data( catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); } else { // context().logger().error("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); - throw PDI::Config_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; + throw PDI::Spectree_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; } } else { // context().logger().error("Unknown Scalar Type for variable `{}'", name); - throw PDI::Config_error{tree, "Unknown Scalar Type for variable `{}'", name}; + throw PDI::Spectree_error{tree, "Unknown Scalar Type for variable `{}'", name}; } } @@ -479,7 +481,7 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri // return spec_ref.scalar_value(); tmp_value = data_expression.to_long(context()); } else { - throw PDI::Config_error{ + throw PDI::Spectree_error{ spec, "Unknown Scalar Type for variable `{}'. The type must be an integer signed or unsigned)", PDI::to_string(spec) @@ -487,12 +489,12 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri // context().logger().error("Unknown Scalar Type for variable {}. The type must be an integerc (signed or unsigned)", PDI::to_string(spec)); } } else { - throw PDI::Config_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; + throw PDI::Spectree_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; // context().logger().error("The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)); } } else { // context().logger().error("Supported only YAML_SCALAR_NODE for variable `{}'", name); - throw PDI::Config_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; + throw PDI::Spectree_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; } // return value in conduit_index_t @@ -509,7 +511,7 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri } } -void catalyst_plugin::fill_node_with_array_pdi_data( +void catalyst_plugin::set_value_for_pdi_array_datatype( conduit_node* node, const std::string& name, PC_tree_t& tree, @@ -518,13 +520,14 @@ void catalyst_plugin::fill_node_with_array_pdi_data( ) { PDI::Datatype_sptr type = array_datatype.subtype(); + // ???? bizarre ==> while (auto&& array_type = std::dynamic_pointer_cast(type)) { type = array_type->subtype(); } auto scalar_datatype = std::dynamic_pointer_cast(type); if (!scalar_datatype) { // context().logger().error("Array subtype of variable {} should be scalar type.", name); - throw PDI::Config_error{tree, "Array subtype of variable `{}' should be scalar type.", name}; + throw PDI::Spectree_error{tree, "Array subtype of variable `{}' should be scalar type.", name}; return; } @@ -600,7 +603,7 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - // throw PDI::Config_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + // throw PDI::Spectree_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; // context().logger().error("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); throw PDI::System_error{"Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } @@ -655,7 +658,7 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - //throw PDI::Config_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; + //throw PDI::Spectree_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; // context().logger().error("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); throw PDI::System_error{"Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } @@ -686,13 +689,13 @@ void catalyst_plugin::fill_node_with_array_pdi_data( endianness ); } else { - // throw PDI::Config_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; + // throw PDI::Spectree_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; // context().logger().error("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); throw PDI::System_error{"Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; } } else { // context().logger().error("Unknown Scalar Type for variable `{}'", name); - // throw PDI::Config_error{tree, "Unknown Scalar Type for variable `{}'", name}; + // throw PDI::Spectree_error{tree, "Unknown Scalar Type for variable `{}'", name}; throw PDI::System_error{"Unknown Scalar Type for variable `{}'", name}; } } @@ -713,7 +716,7 @@ void catalyst_plugin::fill_node_with_array_pdi_data( // // return spec_ref.scalar_value(); // return data_expression.to_long(context()); // } else { -// throw PDI::Config_error{ +// throw PDI::Spectree_error{ // spec, // "Unknown Scalar Type for variable `{}'. The type must be an integerc (signed or unsigned)", // PDI::to_string(spec) @@ -721,13 +724,13 @@ void catalyst_plugin::fill_node_with_array_pdi_data( // // context().logger().error("Unknown Scalar Type for variable {}. The type must be an integerc (signed or unsigned)", PDI::to_string(spec)); // } // } else { -// throw PDI::Config_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; +// throw PDI::Spectree_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; // // context().logger().error("The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)); // } // return 0; // } else { // // context().logger().error("Supported only YAML_SCALAR_NODE for variable `{}'", name); -// throw PDI::Config_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; +// throw PDI::Spectree_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; // } // return 0; // } @@ -739,7 +742,7 @@ std::string catalyst_plugin::read_pdi_execute_event_name() if (PC_status(execute_spec) == PC_OK) { event_name = PDI::to_string(execute_spec); } else { - throw PDI::Config_error{execute_spec, "No event name for catalyst plugin is given"}; + throw PDI::Spectree_error{execute_spec, "No event name for catalyst plugin is given"}; } return event_name; diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index a12626efb..fe8f34584 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -24,7 +24,9 @@ typedef struct conduit_node_impl conduit_node; // ?? class catalyst_plugin: public PDI::Plugin { public: + /// @brief Builds a catalsyt_plugin specification tree from its yaml config catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree); + ~catalyst_plugin(); private: @@ -49,7 +51,7 @@ class catalyst_plugin: public PDI::Plugin /// @brief function running in pdi_finalize void run_catalyst_finalize(); - /// @brief TO COMPLETE + /// @brief TO COMPLET /// @param execute_node /// @param execute_spec /// @param list_vtkGhostType_to_create @@ -59,23 +61,23 @@ class catalyst_plugin: public PDI::Plugin std::vector& list_vtkGhostType_to_create ); - /// @brief TO COMPLETE - /// @param execute_node - /// @param execute_spec - void create_catalyst_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec); + /// @brief creates a conduit_node for catalyst_excute from yaml tree + /// @param execute_node conduit node that will be created + /// @param execute_spec The tree representing the execute section + void create_catalyst_execute_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec); - /// @brief TO COMPLETE - /// @param node - /// @param tree + /// @brief Fills a conduit node corresponding to array shared with pdi from a yaml tree. + /// @param the node in which to operate + /// @param tree specification tree containing a PDI_data_array void fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree); - /// @brief TO COMPLETE - /// @param node - /// @param tree - /// @param name - /// @param scalar_datatype - /// @param ref_r - void fill_node_with_scalar_pdi_data( + /// @brief Sets value of a conduit node corresponding to a pdi scalar datatype from a yaml tree + /// @param node the node in which we set the value + /// @param name name of the array + /// @param tree specification tree containing a PDI_data_array + /// @param scalar_datatype type of the scalar + /// @param ref_r reference of the array + void set_value_for_pdi_scalar_datatype( conduit_node* node, PC_tree_t& tree, const std::string& name, @@ -83,13 +85,13 @@ class catalyst_plugin: public PDI::Plugin PDI::Ref_r& ref_r ); - /// @brief TO COMPLETE - /// @param node - /// @param name - /// @param tree - /// @param array_datatype - /// @param ref_r - void fill_node_with_array_pdi_data( + /// @brief Sets values of a conduit node corresponding to a pdi array datatype from a yaml tree + /// @param node the node in which we set the value + /// @param name name of the array + /// @param tree specification tree containing a PDI_data_array + /// @param array_datatype type of the array + /// @param ref_r reference of the array + void set_value_for_pdi_array_datatype( conduit_node* node, const std::string& name, PC_tree_t& tree, From 574ba6c00cd018a3f585e7ff9dba18b9306d975f Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Mon, 4 May 2026 12:19:39 +0200 Subject: [PATCH 29/31] adding python support --- CMakeLists.txt | 8 +- example/CMakeLists.txt | 23 +-- example/catalyst.yml.in | 1 - plugins/catalyst/CMakeLists.txt | 5 - .../catalyst_plugin_structured_ghost.h | 4 +- plugins/catalyst/pdi_catalyst_plugin.cxx | 141 ++++++++++++------ plugins/catalyst/pdi_catalyst_plugin.h | 2 +- 7 files changed, 109 insertions(+), 75 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 03e0c9017..e64660c4a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -422,6 +422,10 @@ endif() ## Catalyst if("${BUILD_CATALYST_PLUGIN}") + set(CATALYST_WRAP_PYTHON OFF) + if("${BUILD_PYTHON}") + set(CATALYST_WRAP_PYTHON ON) + endif() sbuild_add_dependency(catalyst "${USE_DEFAULT}" EMBEDDED_PATH "vendor/catalyst-2.0.0" CMAKE_CACHE_ARGS @@ -429,13 +433,11 @@ if("${BUILD_CATALYST_PLUGIN}") "-DCATALYST_BUILD_TESTING:BOOL=OFF" "-DCATALYST_USE_MPI:BOOL=ON" "-DCATALYST_WRAP_FORTRAN:BOOL=OFF" - "-DCATALYST_WRAP_PYTHON:BOOL=OFF" + "-DCATALYST_WRAP_PYTHON:BOOL=${CATALYST_WRAP_PYTHON}" VERSION 2.0.0 ) endif() - - ## JSON if("${BUILD_JSON_PLUGIN}") diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt index 01ef958d3..904ac06dc 100644 --- a/example/CMakeLists.txt +++ b/example/CMakeLists.txt @@ -91,11 +91,9 @@ end module endif() endif() - add_executable(PDI_example_C example.c) target_link_libraries(PDI_example_C PRIVATE PDI::PDI_C paraconf::paraconf MPI::MPI_C m) - if("${BUILD_FORTRAN}") add_executable(PDI_example_F example.F90) target_link_libraries(PDI_example_F PRIVATE PDI::PDI_f90 paraconf::paraconf_f90 MPI_with_mod m) @@ -173,26 +171,19 @@ set(CATALYST_SCRIPT_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}) configure_file(catalyst_serial.yml.in ${CMAKE_BINARY_DIR}/catalyst_serial.yml) ## keep serial for personal testing configure_file(catalyst.yml.in ${CMAKE_BINARY_DIR}/catalyst.yml) -add_test(NAME PDI_example_catalyst_C_serial COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 1 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst_serial.yml") +add_test(NAME PDI_example_catalyst_C_serial COMMAND "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 1 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst_serial.yml") set_property(TEST PDI_example_catalyst_C_serial PROPERTY TIMEOUT 15) set_property(TEST PDI_example_catalyst_C_serial PROPERTY PROCESSORS 1) -add_test(NAME PDI_example_catalyst_C COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst.yml") +add_test(NAME PDI_example_catalyst_C COMMAND "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_BINARY_DIR}/catalyst.yml") set_property(TEST PDI_example_catalyst_C PROPERTY TIMEOUT 15) set_property(TEST PDI_example_catalyst_C PROPERTY PROCESSORS 3) -## -# if("${BUILD_FORTRAN}") -# add_test(NAME PDI_example_catalyst_F COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" ${MPIEXEC_POSTFLAGS} "${CMAKE_CURRENT_SOURCE_DIR}/catalyst.yml") -# set_property(TEST PDI_example_catalyst_F PROPERTY TIMEOUT 15) -# set_property(TEST PDI_example_catalyst_F PROPERTY PROCESSORS 3) -# endif("${BUILD_FORTRAN}") - -# if("${BUILD_PYTHON}") -# add_test(NAME PDI_example_catalyst_P COMMAND "${RUNTEST_DIR}" "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 3 ${MPIEXEC_PREFLAGS} "$" "${Python3_EXECUTABLE}" ${MPIEXEC_POSTFLAGS} "${CMAKE_CURRENT_SOURCE_DIR}/example.py" "${CMAKE_CURRENT_SOURCE_DIR}/catalyst.yml") -# set_property(TEST PDI_example_catalyst_P PROPERTY TIMEOUT 15) -# set_property(TEST PDI_example_catalyst_P PROPERTY PROCESSORS 3) -# endif("${BUILD_PYTHON}") +if("${BUILD_PYTHON}" AND "${CATALYST_USE_PYTHON}") +add_test(NAME PDI_example_catalyst_P COMMAND "${MPIEXEC}" "${MPIEXEC_NUMPROC_FLAG}" 1 ${MPIEXEC_PREFLAGS} "$" "${Python3_EXECUTABLE}" ${MPIEXEC_POSTFLAGS} "${CMAKE_CURRENT_SOURCE_DIR}/example.py" "${CMAKE_BINARY_DIR}/catalyst_serial.yml") +set_property(TEST PDI_example_catalyst_P PROPERTY TIMEOUT 15) +set_property(TEST PDI_example_catalyst_P PROPERTY PROCESSORS 1) +endif("${BUILD_PYTHON}" AND "${CATALYST_USE_PYTHON}") endif("${CATALYST_USE_MPI}") endif("${BUILD_CATALYST_PLUGIN}") diff --git a/example/catalyst.yml.in b/example/catalyst.yml.in index daf777769..7cf08f6be 100644 --- a/example/catalyst.yml.in +++ b/example/catalyst.yml.in @@ -15,7 +15,6 @@ pdi: data: # type of values for which PDI does not keep a copy main_field: { size: [ '$dsize[0]', '$dsize[1]' ], type: array, subtype: double } - logging: debug plugins: mpi: catalyst: diff --git a/plugins/catalyst/CMakeLists.txt b/plugins/catalyst/CMakeLists.txt index 63bd111aa..4d3ca9241 100644 --- a/plugins/catalyst/CMakeLists.txt +++ b/plugins/catalyst/CMakeLists.txt @@ -26,10 +26,6 @@ find_package(Python3 REQUIRED COMPONENTS Interpreter Development) # Catalyst # find_package(catalyst REQUIRED) -# if(NOT ${CATALYST_USE_MPI}) -# message(WARNING "No MPI support in your Catalyst library, please activate MPI in your Catalyst build if you want to use in parallel.") -# endif() - find_package(catalyst REQUIRED) if("${BUILD_CATALYST_PARALLEL}" AND NOT "${CATALYST_USE_MPI}") message(FATAL_ERROR "Catalyst with MPI support required, sequential catalyst only found. Please set -DBUILD_CATALYST_PARALLEL=OFF to disable parallel Catalyst") @@ -54,7 +50,6 @@ find_package(PDI REQUIRED COMPONENTS plugins) # ${PDI_COMPONENTS}) # The Plugin add_library(pdi_catalyst_plugin MODULE pdi_catalyst_plugin.cxx) if("${CATALYST_USE_MPI}") - #target_include_directories(pdi_catalyst_plugin PRIVATE "${MPI_C_INCLUDE_DIRS}") target_compile_definitions(pdi_catalyst_plugin PUBLIC CATALYST_IS_PARALLEL=ON) endif() target_link_libraries(pdi_catalyst_plugin PDI::PDI_plugins ${CATALYST_DEPS}) diff --git a/plugins/catalyst/catalyst_plugin_structured_ghost.h b/plugins/catalyst/catalyst_plugin_structured_ghost.h index 68eff9cd9..9085bb9dc 100644 --- a/plugins/catalyst/catalyst_plugin_structured_ghost.h +++ b/plugins/catalyst/catalyst_plugin_structured_ghost.h @@ -73,7 +73,7 @@ class Catalyst_plugin_structured_ghost m_ctx.logger().info("For the uniform topology `{}', the name of coordset is `{}'", m_topology_name, value_coordset); std::string path_to_origins = "topologies/"+m_topology_name+"/origin"; - //if (value_type == "uniform" && conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { + if (conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { throw PDI::Spectree_error{m_ghost_tree, "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview."}; } @@ -100,7 +100,7 @@ class Catalyst_plugin_structured_ghost PDI::each(mask_ghost_spec , [&](PC_tree_t key_tree, PC_tree_t value) { std::string key = PDI::to_string(key_tree); - m_ctx.logger().info("key= {}", key); + m_ctx.logger().debug("read key= {} in ghost layers section.", key); if (key == "size") { PDI::opt_each(value, [&](PC_tree_t size) { m_size.emplace_back(PDI::to_string(size)); }); } else if (key == "start") { diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 93f8fdade..d9e4e2296 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -30,9 +30,25 @@ catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) ctx.callbacks().add_event_callback([this](const std::string& event_name) { this->process_event(event_name); }); } -catalyst_plugin::~catalyst_plugin() +catalyst_plugin::~catalyst_plugin() noexcept(false) { - run_catalyst_finalize(); + try{ + run_catalyst_finalize(); + } catch (const std::exception& e) { + if (std::uncaught_exceptions()) { + throw; + } else { + context().logger().error("When closing catalyst plugin `{}'",e.what()); + } + } catch(...) { + if (std::uncaught_exceptions()) { + throw; + } + else { + context().logger().error("When closing catalyst plugin"); + } + } + context().logger().info("Closing plugin"); } void catalyst_plugin::process_pdi_init() @@ -56,6 +72,7 @@ void catalyst_plugin::process_pdi_init() void catalyst_plugin::process_event(const std::string& event_name) { if (event_name == this->m_pdi_execute_event_name) { + context().logger().info("call run_catalyst_execute in event `{}'...",event_name); run_catalyst_execute(); } } @@ -166,9 +183,9 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( auto current = remaining_tree_and_parent_node.top(); remaining_tree_and_parent_node.pop(); - context().logger().info("Read node of name`{}'", current.name); + context().logger().debug("Read node of name`{}'", current.name); if (current.name == "ghost_layers") { - context().logger().info("Ghost layer node: `{}'", current.name); + context().logger().debug("Ghost layer node: `{}'", current.name); if (current.tree.node->type == YAML_MAPPING_NODE) { int data_tree_size = PDI::len(current.tree); @@ -232,7 +249,6 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute while (!remaining_tree_and_parent_node.empty()) { auto current = remaining_tree_and_parent_node.top(); remaining_tree_and_parent_node.pop(); - context().logger().info("Read node of name`{}'", current.name); if (current.name == "ghost_layers") { context().logger().info(" ghost_layers keys will be read after"); @@ -249,6 +265,7 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute // } // } } else { + context().logger().info("Read node of name`{}'", current.name); auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; switch (current.tree.node->type) { case YAML_NO_NODE: @@ -259,39 +276,77 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute switch (current.tree.node->data.scalar.style) { case YAML_PLAIN_SCALAR_STYLE: case YAML_SINGLE_QUOTED_SCALAR_STYLE: + case YAML_DOUBLE_QUOTED_SCALAR_STYLE: + { // handle integer or float/double type that depend perhaps on scalar PDI data - { - std::string data_name{PDI::to_string(current.tree)}; - PDI::Expression data_expression{PDI::to_string(current.tree)}; - PDI::Ref_r spec_ref = data_expression.to_ref(context()); - auto data_type = spec_ref.type()->evaluate(context()); - - if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { - set_value_for_pdi_scalar_datatype(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); - } else { - // context().logger().error("Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", data_name); - throw PDI::Spectree_error{ - current.tree, - "Unsupported datatype for variable: `{}'. It should be scalar type (integer or float).", - data_name - }; + context().logger().info("$$ read value of an integer or float or string "); + std::string data_name{PDI::to_string(current.tree)}; + context().logger().info("data_name=`{}'",data_name); + PDI::Expression data_expression{PDI::to_string(current.tree)}; + + PDI::Ref_r spec_ref = data_expression.to_ref(context()); + if (!spec_ref) { + context().logger().info("problem !spec_ref"); + throw PDI::Value_error{ + "Error of right access for: `{}'", + data_name + }; + } + auto data_type = spec_ref.type()->evaluate(context()); + if (!data_type) { + context().logger().info("problem !data_type"); + throw PDI::Spectree_error{ + current.tree, + "Error of right access for: `{}'", + data_name + }; + } + if (auto&& scalar_datatype = std::dynamic_pointer_cast(data_type)) { + context().logger().debug("## read a scalar type `{}' ##", data_name); + set_value_for_pdi_scalar_datatype(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); + } else if(auto &&array_datatype = std::dynamic_pointer_cast(data_type)) { + context().logger().debug("## read an array type `{}' ##", data_name); + + // check the array_datatype is a string + PDI::Datatype_sptr type = array_datatype->subtype(); + { + auto&& array_type = std::dynamic_pointer_cast(type); + type = array_type->subtype(); + } + // case multi dimensional array ?? + // while (auto&& array_type = std::dynamic_pointer_cast(type)) { + // type = array_type->subtype(); + // } + auto array_scalar_datatype = std::dynamic_pointer_cast(type); + if (!array_scalar_datatype) { + // context().logger().error("Array subtype of variable {} should be scalar type.", name); + throw PDI::Spectree_error{current.tree, "Array subtype of variable `{}' should be scalar type.", current.name}; + } + PDI::Scalar_kind scalar_kind = array_scalar_datatype->kind(); + if (scalar_kind == PDI::Scalar_kind::SIGNED && array_scalar_datatype->buffersize() == sizeof(char)) + { + context().logger().debug("## scalar_kind is signed"); + current_node.set_string(PDI::to_string(current.tree)); + } + else if (scalar_kind == PDI::Scalar_kind::UNSIGNED && array_scalar_datatype->buffersize() == sizeof(unsigned char)) + { + context().logger().info("## scalar_kind is unsigned"); + current_node.set_string(PDI::to_string(current.tree)); + } + else { + throw PDI::Spectree_error{current.tree, "The scalar type must be a string for `{}'.", current.name}; } } - break; - case YAML_DOUBLE_QUOTED_SCALAR_STYLE: - // handle string type - current_node.set_string(PDI::to_string(current.tree)); - break; + } + break; case YAML_LITERAL_SCALAR_STYLE: case YAML_FOLDED_SCALAR_STYLE: case YAML_ANY_SCALAR_STYLE: - // context().logger().error("Unsupported YAML scalar style for variable `{}'", current.name); throw PDI::Spectree_error{current.tree, "Unsupported YAML scalar style for variable `{}'", current.name}; break; } break; case YAML_SEQUENCE_NODE: - // context().logger().error("Unsupported Sequence YAML Node for variable `{}'", current.name); throw PDI::Spectree_error{current.tree, "Unsupported Sequence YAML Node for variable `{}'", current.name}; break; case YAML_MAPPING_NODE: @@ -315,7 +370,6 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute auto value = PC_get(current.tree, "<%d>", index); remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node)}); } - break; } } } @@ -323,14 +377,18 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute void catalyst_plugin::run_catalyst_execute() { + context().logger().info("run_catalyst_execute()"); conduit_cpp::Node node; std::vector list_vtkGhostType_to_create; // object contain vector vtkGhostType auto execute_spec = PC_get(this->m_spec_tree, ".execute"); + context().logger().info("get m_spec_tree execute"); conduit_node* node_pointer = conduit_cpp::c_node(&node); // create the conduit node for catalyst_execute + context().logger().info("create conduit_node"); create_catalyst_execute_conduit_node(node_pointer, execute_spec); + context().logger().info("read vtk_ghost"); // read information to create the vtkGhostType for paraview (read "ghost_layers" node in the yaml file) //create_node_for_mask_ghost( node_pointer, execute_spec, list_vtkGhostType_to_create); read_info_for_creating_vtk_ghost(node_pointer, execute_spec, list_vtkGhostType_to_create); @@ -348,20 +406,17 @@ void catalyst_plugin::run_catalyst_execute() node[tmp_path + "/fields/vtkGhostType/values"].set_external(vtk_ghost_type.get_vector(), vtk_ghost_type.get_size(), 0, 1, sizeof(uint8_t)); } - context().logger().debug("Print conduit node including vtk ghost type created ..."); + context().logger().info("Print conduit node including vtk ghost type created ..."); if (context().logger().level() == spdlog::level::debug || context().logger().level() == spdlog::level::trace) { node.print(); } - context().logger().debug("catalyst_execute call..."); + context().logger().info("catalyst_execute call..."); auto result = catalyst_execute(node_pointer); if (result != catalyst_status_ok) { - // context().logger().error("catalyst_execute failure"); throw PDI::System_error{"catalyst_execute failure"}; } - - // clear m_current_pdi_data at each iteration - // this->m_current_pdi_data.clear(); + context().logger().info("end catalyst_execute call..."); } void catalyst_plugin::run_catalyst_finalize() @@ -394,15 +449,12 @@ void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_ context().logger().warn("Cannot read `{}' this data is not available", name); // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error throw PDI::System_error{"No \"name\" child in PDI_data_array spec `{}'.", name}; - //return; } auto data_type = ref_r.type(); if (auto array_datatype = std::dynamic_pointer_cast(data_type)) { set_value_for_pdi_array_datatype(node, name, tree, *array_datatype, ref_r); } else { - // context().logger().error("Unsupported datatype for variable: {}. The type should be array type.", name); - // throw PDI::Spectree_error{tree, "Unsupported datatype for variable: `{}'. The type should be array type.", name}; // Remark: This error can arrive outside PDI_initilialize. This implies that is not really a config error throw PDI::System_error{"Unsupported datatype for variable: `{}'. The type should be array type.", name}; } @@ -429,7 +481,7 @@ void catalyst_plugin::set_value_for_pdi_scalar_datatype( } else if (buffer_size == sizeof(conduit_int64)) { catalyst_conduit_node_set_int64(node, *static_cast(ref_r.get())); } else { - // context().logger().error("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); + context().logger().info("Unknown SIGNED buffer size of {} for variable `{}'", buffer_size, name); throw PDI::Spectree_error{tree, "Unknown SIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED) { @@ -443,7 +495,7 @@ void catalyst_plugin::set_value_for_pdi_scalar_datatype( } else if (buffer_size == sizeof(conduit_uint64)) { catalyst_conduit_node_set_uint64(node, *static_cast(ref_r.get())); } else { - // context().logger().error("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); + context().logger().info("Unknown UNSIGNED buffer size of {} for variable `{}'", buffer_size, name); throw PDI::Spectree_error{tree, "Unknown UNSIGNED buffer size of `{}' for variable `{}'", buffer_size, name}; } } else if (scalar_kind == PDI::Scalar_kind::FLOAT) { @@ -453,11 +505,11 @@ void catalyst_plugin::set_value_for_pdi_scalar_datatype( } else if (buffer_size == sizeof(conduit_float64)) { catalyst_conduit_node_set_float64(node, *static_cast(ref_r.get())); } else { - // context().logger().error("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); + context().logger().info("Unknown FLOAT buffer size of {} for variable `{}'", buffer_size, name); throw PDI::Spectree_error{tree, "Unknown FLOAT buffer size of `{}' for variable `{}'", buffer_size, name}; } } else { - // context().logger().error("Unknown Scalar Type for variable `{}'", name); + context().logger().info("Unknown Scalar Type for variable `{}'", name); throw PDI::Spectree_error{tree, "Unknown Scalar Type for variable `{}'", name}; } } @@ -486,14 +538,11 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri "Unknown Scalar Type for variable `{}'. The type must be an integer signed or unsigned)", PDI::to_string(spec) }; - // context().logger().error("Unknown Scalar Type for variable {}. The type must be an integerc (signed or unsigned)", PDI::to_string(spec)); } } else { throw PDI::Spectree_error{spec, "The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)}; - // context().logger().error("The datatype must be a scalar datatype for variable: `{}'", PDI::to_string(spec)); } } else { - // context().logger().error("Supported only YAML_SCALAR_NODE for variable `{}'", name); throw PDI::Spectree_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; } @@ -504,7 +553,6 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri // case conduit_index_t is 32-bits value = static_cast(tmp_value); if (value != tmp_value) { - // context().logger().error("Error in cast of a type conduit_index_t in long. {} != `{}'", value, tmp_value); throw PDI::System_error{"Error in cast of a type conduit_index_t in long. `{}' != `{}'", value, tmp_value}; } } @@ -520,7 +568,6 @@ void catalyst_plugin::set_value_for_pdi_array_datatype( ) { PDI::Datatype_sptr type = array_datatype.subtype(); - // ???? bizarre ==> while (auto&& array_type = std::dynamic_pointer_cast(type)) { type = array_type->subtype(); } @@ -744,6 +791,6 @@ std::string catalyst_plugin::read_pdi_execute_event_name() } else { throw PDI::Spectree_error{execute_spec, "No event name for catalyst plugin is given"}; } - + context().logger().info("result: read_pdi_execute_event_name `{}'.",event_name); return event_name; } diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index fe8f34584..1b727c5e4 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -27,7 +27,7 @@ class catalyst_plugin: public PDI::Plugin /// @brief Builds a catalsyt_plugin specification tree from its yaml config catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree); - ~catalyst_plugin(); + ~catalyst_plugin() noexcept(false); private: /// @brief callback used at pdi_init From b1acca6b852ca78743e938b650b06088530696b5 Mon Sep 17 00:00:00 2001 From: "jacques.morice" Date: Mon, 4 May 2026 12:47:18 +0200 Subject: [PATCH 30/31] fix indent --- .../catalyst_plugin_structured_ghost.h | 211 ++++++++++-------- plugins/catalyst/pdi_catalyst_plugin.cxx | 85 +++---- plugins/catalyst/pdi_catalyst_plugin.h | 6 +- plugins/catalyst/tests_ghost/example.c | 93 +++++--- 4 files changed, 213 insertions(+), 182 deletions(-) diff --git a/plugins/catalyst/catalyst_plugin_structured_ghost.h b/plugins/catalyst/catalyst_plugin_structured_ghost.h index 9085bb9dc..f93b1623b 100644 --- a/plugins/catalyst/catalyst_plugin_structured_ghost.h +++ b/plugins/catalyst/catalyst_plugin_structured_ghost.h @@ -3,9 +3,9 @@ #include "catalyst.hpp" -#include +#include +#include #include -#include #include #include @@ -18,7 +18,7 @@ class Catalyst_plugin_structured_ghost /// The tree representing the ghost config PC_tree_t m_ghost_tree; /// The parent tree of m_ghost_tree for specification tree error message - PC_tree_t m_parent_tree; + PC_tree_t m_parent_tree; /// name of the mesh (It correspond to the topology name in the mesh blue print) std::string m_topology_name; // topology name @@ -37,10 +37,12 @@ class Catalyst_plugin_structured_ghost /// vtkGhostType vector for paraview std::vector m_vtk_ghost_type; - public: - - Catalyst_plugin_structured_ghost( PDI::Context& ctx, conduit_node * parent_node, PC_tree_t &tree, PC_tree_t &parent_tree, const int &index) - : m_ctx{ctx}, m_ghost_tree(tree), m_parent_tree(parent_tree), m_vtk_ghost_type{1,0} +public: + Catalyst_plugin_structured_ghost(PDI::Context& ctx, conduit_node* parent_node, PC_tree_t& tree, PC_tree_t& parent_tree, const int& index) + : m_ctx{ctx} + , m_ghost_tree(tree) + , m_parent_tree(parent_tree) + , m_vtk_ghost_type{1, 0} { // get the name of mesh(topology) auto m_topology_name_spec = PC_get(m_ghost_tree, "{%d}", index); @@ -49,56 +51,62 @@ class Catalyst_plugin_structured_ghost } else { throw PDI::Spectree_error{m_ghost_tree, "The name of the topology is not defined."}; } - - // A TESTER: - std::string value_type = get_name_from_parent_node( parent_node, "type"); + + // A TESTER: + std::string value_type = get_name_from_parent_node(parent_node, "type"); m_ctx.logger().info("topology type is `{}'", value_type); //=============================== // topo=structured if (value_type == "structured") { - std::string path_to_dims = "topologies/"+m_topology_name+"/elements/dims/"; - std::string PC_to_dataname = ".topologies."+m_topology_name; + std::string path_to_dims = "topologies/" + m_topology_name + "/elements/dims/"; + std::string PC_to_dataname = ".topologies." + m_topology_name; - get_dimension( parent_node, path_to_dims, PC_to_dataname, "topology", m_topology_name); - } + get_dimension(parent_node, path_to_dims, PC_to_dataname, "topology", m_topology_name); + } //================ // topo=uniform - else if (value_type == "uniform") { - - // A TESTER: - std::string value_coordset = get_name_from_parent_node( parent_node, "coordset"); + else if (value_type == "uniform") + { + // A TESTER: + std::string value_coordset = get_name_from_parent_node(parent_node, "coordset"); m_ctx.logger().info("For the uniform topology `{}', the name of coordset is `{}'", m_topology_name, value_coordset); - std::string path_to_origins = "topologies/"+m_topology_name+"/origin"; + std::string path_to_origins = "topologies/" + m_topology_name + "/origin"; if (conduit_cpp::cpp_node(parent_node).has_path(path_to_origins)) { - throw PDI::Spectree_error{m_ghost_tree, "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview."}; + throw PDI::Spectree_error{ + m_ghost_tree, + "For uniform topology, we dont support origin keyword to generate vtkGhostType for paraview." + }; } - std::string path_to_dims = "coordsets/"+value_coordset+"/dims/"; - std::string PC_to_dataname = ".coordsets."+value_coordset; + std::string path_to_dims = "coordsets/" + value_coordset + "/dims/"; + std::string PC_to_dataname = ".coordsets." + value_coordset; - get_dimension( parent_node, path_to_dims, PC_to_dataname, "coordset", value_coordset); + get_dimension(parent_node, path_to_dims, PC_to_dataname, "coordset", value_coordset); // The value in dims_vecGhost corresponding to the number of points in each direction // We remove 1 because we consider the number of elements in each direction when we create vtkGhostType - for (int ii=0; ii", index); - PDI::each(mask_ghost_spec , [&](PC_tree_t key_tree, PC_tree_t value) { + PDI::each(mask_ghost_spec, [&](PC_tree_t key_tree, PC_tree_t value) { std::string key = PDI::to_string(key_tree); m_ctx.logger().debug("read key= {} in ghost layers section.", key); if (key == "size") { @@ -107,23 +115,33 @@ class Catalyst_plugin_structured_ghost PDI::opt_each(value, [&](PC_tree_t start) { m_start.emplace_back(PDI::to_string(start)); }); } else if (key == "association") { m_association = PDI::to_string(value); - } - else { + } else { throw PDI::Spectree_error{key_tree, "Invalid configuration key in mask_ghost for topology `{}': `{}'", m_topology_name, key}; } }); if (m_size.size() != m_start.size()) { - throw PDI::Spectree_error{m_ghost_tree, "Invalid configuration in mask_ghost for topology `{}' the number of elements in size and in start are not the same.", m_topology_name}; + throw PDI::Spectree_error{ + m_ghost_tree, + "Invalid configuration in mask_ghost for topology `{}' the number of elements in size and in start are not the same.", + m_topology_name + }; } if (m_size.size() != m_dimensions.size()) { - throw PDI::Spectree_error{m_parent_tree, "Invalid configuration in mask_ghost for topology `{}', the dimension of the problem `{}' is not equal to `{}' the number of elements in size and in start.", m_topology_name, m_dimensions.size(), m_size.size()}; + throw PDI::Spectree_error{ + m_parent_tree, + "Invalid configuration in mask_ghost for topology `{}', the dimension of the problem `{}' is not equal to `{}' the number of " + "elements in size and in start.", + m_topology_name, + m_dimensions.size(), + m_size.size() + }; } - + // check size + start + dims (TODO: en dernier) m_ctx.logger().info("space dimension {}", m_dimensions.size()); - for (int ii=0; ii < m_dimensions.size(); ++ii){ + for (int ii = 0; ii < m_dimensions.size(); ++ii) { m_ctx.logger().info("`{}'-th dimensions of the mesh `{}'", ii, m_dimensions[ii]); } @@ -132,8 +150,8 @@ class Catalyst_plugin_structured_ghost } ~Catalyst_plugin_structured_ghost() {} - - /// creation of the mask ghost (VtkGhostType) need by paraview + + /// creation of the mask ghost (VtkGhostType) need by paraview void create_vtk_ghost_type() { int space_dimension = m_dimensions.size(); @@ -142,135 +160,146 @@ class Catalyst_plugin_structured_ghost std::vector start(space_dimension); for (int size_id = 0; size_id < space_dimension; ++size_id) { - last[size_id] = m_size[size_id].to_long(m_ctx); + last[size_id] = m_size[size_id].to_long(m_ctx); start[size_id] = m_start[size_id].to_long(m_ctx); } - + std::transform(start.begin(), start.end(), last.begin(), last.begin(), [](long start, long last) { return last + start; }); - for (int ii=0; ii= last[0] || ii < start[1] || ii >= last[1]) { - m_vtk_ghost_type[ii*(m_dimensions[0])+jj] = (uint8_t) 1; + m_vtk_ghost_type[ii * (m_dimensions[0]) + jj] = (uint8_t)1; } else { - m_vtk_ghost_type[ii*(m_dimensions[0])+jj] = (uint8_t) 0; + m_vtk_ghost_type[ii * (m_dimensions[0]) + jj] = (uint8_t)0; } } } - } - else if (space_dimension == 3) { + } else if (space_dimension == 3) { size_t vsize = m_dimensions[0] * m_dimensions[1] * m_dimensions[2]; - m_vtk_ghost_type.resize( vsize ); + m_vtk_ghost_type.resize(vsize); for (int ii = 0; ii < m_dimensions[2]; ++ii) { for (int jj = 0; jj < m_dimensions[1]; ++jj) { for (int kk = 0; kk < m_dimensions[0]; ++kk) { - if ( kk < start[0] || jj < start[1] || ii < start[2] ) { - m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 1; - } else if ( kk >= last[0] || jj >= last[1] || ii >= last[2] ) { - m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 1; + if (kk < start[0] || jj < start[1] || ii < start[2]) { + m_vtk_ghost_type[ii * (m_dimensions[1] * m_dimensions[0]) + jj * m_dimensions[0] + kk] = 1; + } else if (kk >= last[0] || jj >= last[1] || ii >= last[2]) { + m_vtk_ghost_type[ii * (m_dimensions[1] * m_dimensions[0]) + jj * m_dimensions[0] + kk] = 1; } else { - m_vtk_ghost_type[ ii*(m_dimensions[1]*m_dimensions[0]) + jj*m_dimensions[0] + kk ] = 0; + m_vtk_ghost_type[ii * (m_dimensions[1] * m_dimensions[0]) + jj * m_dimensions[0] + kk] = 0; } } } } - } - else { + } else { std::cout << " Error in the creation of the vtkGhostType for the users: The dimension for the mesh must be 2 or 3." << std::endl; } } - + /// return the pointer to the mask ghost - uint8_t * get_vector() { return m_vtk_ghost_type.data(); } + uint8_t* get_vector() { return m_vtk_ghost_type.data(); } /// get the size of the pointer of the mask ghost size_t get_size() { return m_vtk_ghost_type.size(); } /// get the name path in the conduit node - const std::string & get_node_path() const { return m_parent_node_path; } - - /// @brief get the name of topology(mesh) - const std::string & get_topology_name() const { return m_topology_name; } + const std::string& get_node_path() const { return m_parent_node_path; } + /// @brief get the name of topology(mesh) + const std::string& get_topology_name() const { return m_topology_name; } /// @brief retrieve the corresponding PC_tree for a given coordset or a given topology - PC_tree_t retrieve_pc_tree_from_parent_node( const std::string &structname, const std::string &dataname) { - std::string index_all = "."+structname+"."+m_topology_name+"."+dataname; - auto dataname_tree = PC_get(m_parent_tree,index_all.c_str()); + PC_tree_t retrieve_pc_tree_from_parent_node(const std::string& structname, const std::string& dataname) + { + std::string index_all = "." + structname + "." + m_topology_name + "." + dataname; + auto dataname_tree = PC_get(m_parent_tree, index_all.c_str()); return dataname_tree; } - std::string get_name_from_parent_node( conduit_node * parent_node, const std::string &dataname) { - std::string path_to_type = "topologies/"+m_topology_name+"/"+dataname; - std::string PC_to_type = ".topologies."+m_topology_name+"."+dataname; + std::string get_name_from_parent_node(conduit_node* parent_node, const std::string& dataname) + { + std::string path_to_type = "topologies/" + m_topology_name + "/" + dataname; + std::string PC_to_type = ".topologies." + m_topology_name + "." + dataname; bool dataname_is_empty = conduit_cpp::cpp_node(parent_node)[path_to_type].dtype().is_empty(); if (!dataname_is_empty) { bool dataname_is_string = conduit_cpp::cpp_node(parent_node)[path_to_type].dtype().is_string(); if (dataname_is_string) { return conduit_cpp::cpp_node(parent_node)[path_to_type].as_string(); - } else{ + } else { PC_tree_t msg_tree = retrieve_pc_tree_from_parent_node("topologies", dataname); - throw PDI::Spectree_error{msg_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined as a string.", dataname, m_topology_name}; + throw PDI::Spectree_error{ + msg_tree, + "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined as a string.", + dataname, + m_topology_name + }; } - } - else { - throw PDI::Spectree_error{m_parent_tree, "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined.", dataname, m_topology_name}; + } else { + throw PDI::Spectree_error{ + m_parent_tree, + "... Vec Ghost Type catalyst ... The {} for topology `{}' is not defined.", + dataname, + m_topology_name + }; } } /// @brief Retrieve the dimension of the mask ghost - /// @param parent_node + /// @param parent_node /// @param path_to_dims // path in the conduit node where the dimensions are /// @param PC_to_dataname // path in the PC_tree to get the PC_tree for error message /// @param data_type // type of data (coordset or topology) where the dimensions are defiened for error message /// @param data_type_name // name of coordset or name of topology for error message - void get_dimension( conduit_node * parent_node, std::string & path_to_dims, std::string & PC_to_dataname, std::string data_type, std::string &data_type_name) { - + void get_dimension( + conduit_node* parent_node, + std::string& path_to_dims, + std::string& PC_to_dataname, + std::string data_type, + std::string& data_type_name + ) + { std::string msg_data = data_type + " " + data_type_name; PC_tree_t msg_tree = PC_get(m_parent_tree, PC_to_dataname.c_str()); - if ( PC_status(msg_tree)) { + if (PC_status(msg_tree)) { throw PDI::Spectree_error(msg_tree, ""); } else { if (conduit_cpp::cpp_node(parent_node).has_path(path_to_dims)) { - std::list list_dims{"i","j","k"}; - for (auto &&elem: list_dims) { + std::list list_dims{"i", "j", "k"}; + for (auto&& elem: list_dims) { // verify dims/{elem} exist in the node m_ghost_tree - std::string path_leaf = path_to_dims+elem; + std::string path_leaf = path_to_dims + elem; if (conduit_cpp::cpp_node(parent_node).has_path(path_leaf)) { auto node_path = conduit_cpp::cpp_node(parent_node)[path_leaf]; // check the variable is an integer if (node_path.dtype().is_integer()) { - int tmp_int = (int) node_path.to_int(); + int tmp_int = (int)node_path.to_int(); m_dimensions.emplace_back(tmp_int); - m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int,msg_data); - } - else if (node_path.dtype().is_long()) { - int tmp_int = (int) node_path.to_long(); + m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int, msg_data); + } else if (node_path.dtype().is_long()) { + int tmp_int = (int)node_path.to_long(); m_dimensions.emplace_back(tmp_int); - m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int,msg_data); - } - else { + m_ctx.logger().info("dims/`{}' = `{}' for the `{}'.", elem, tmp_int, msg_data); + } else { throw PDI::Spectree_error{msg_tree, "For `{}' the value of dims/`{}' is not an integer or a long", msg_data, elem}; - } - } - else{ + } + } else { // info message in case of dims/i, dims/j, dims/k doesn't exist. m_ctx.logger().info("No dims/`{}' is not defined for the `{}'.", elem, msg_data); } } - if (m_dimensions.size()==0) { + if (m_dimensions.size() == 0) { throw PDI::Spectree_error{msg_tree, "No dims/i , dims/j and dims/k are defined for the `{}'", msg_data}; } } else { diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index d9e4e2296..7a487ae1a 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -32,19 +32,18 @@ catalyst_plugin::catalyst_plugin(PDI::Context& ctx, PC_tree_t spec_tree) catalyst_plugin::~catalyst_plugin() noexcept(false) { - try{ + try { run_catalyst_finalize(); } catch (const std::exception& e) { if (std::uncaught_exceptions()) { throw; } else { - context().logger().error("When closing catalyst plugin `{}'",e.what()); + context().logger().error("When closing catalyst plugin `{}'", e.what()); } - } catch(...) { + } catch (...) { if (std::uncaught_exceptions()) { throw; - } - else { + } else { context().logger().error("When closing catalyst plugin"); } } @@ -72,7 +71,7 @@ void catalyst_plugin::process_pdi_init() void catalyst_plugin::process_event(const std::string& event_name) { if (event_name == this->m_pdi_execute_event_name) { - context().logger().info("call run_catalyst_execute in event `{}'...",event_name); + context().logger().info("call run_catalyst_execute in event `{}'...", event_name); run_catalyst_execute(); } } @@ -252,18 +251,18 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute context().logger().info("Read node of name`{}'", current.name); if (current.name == "ghost_layers") { context().logger().info(" ghost_layers keys will be read after"); - // } else if (current.name == "elements") { - // int data_tree_size = PDI::len(current.tree); - // // Check for dynamic PDI Data array - // bool pdi_data_array = false; - // for (int index = data_tree_size - 1; index >= 0; --index) { - // auto key = PC_get(current.tree, "{%d}", index); - // if (PDI::to_string(key) == "dims") { - // PDI::Spectree_error{current.tree, "I found dims dans elements `{}'", current.name}; - // } else { - // PDI::Spectree_error{current.tree, "The key is not dims and it is `{}'", PDI::to_string(key)}; - // } - // } + // } else if (current.name == "elements") { + // int data_tree_size = PDI::len(current.tree); + // // Check for dynamic PDI Data array + // bool pdi_data_array = false; + // for (int index = data_tree_size - 1; index >= 0; --index) { + // auto key = PC_get(current.tree, "{%d}", index); + // if (PDI::to_string(key) == "dims") { + // PDI::Spectree_error{current.tree, "I found dims dans elements `{}'", current.name}; + // } else { + // PDI::Spectree_error{current.tree, "The key is not dims and it is `{}'", PDI::to_string(key)}; + // } + // } } else { context().logger().info("Read node of name`{}'", current.name); auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; @@ -276,69 +275,52 @@ void catalyst_plugin::create_catalyst_execute_conduit_node(conduit_node* execute switch (current.tree.node->data.scalar.style) { case YAML_PLAIN_SCALAR_STYLE: case YAML_SINGLE_QUOTED_SCALAR_STYLE: - case YAML_DOUBLE_QUOTED_SCALAR_STYLE: - { + case YAML_DOUBLE_QUOTED_SCALAR_STYLE: { // handle integer or float/double type that depend perhaps on scalar PDI data context().logger().info("$$ read value of an integer or float or string "); std::string data_name{PDI::to_string(current.tree)}; - context().logger().info("data_name=`{}'",data_name); + context().logger().info("data_name=`{}'", data_name); PDI::Expression data_expression{PDI::to_string(current.tree)}; PDI::Ref_r spec_ref = data_expression.to_ref(context()); if (!spec_ref) { context().logger().info("problem !spec_ref"); - throw PDI::Value_error{ - "Error of right access for: `{}'", - data_name - }; + throw PDI::Value_error{"Error of right access for: `{}'", data_name}; } auto data_type = spec_ref.type()->evaluate(context()); if (!data_type) { context().logger().info("problem !data_type"); - throw PDI::Spectree_error{ - current.tree, - "Error of right access for: `{}'", - data_name - }; + throw PDI::Spectree_error{current.tree, "Error of right access for: `{}'", data_name}; } if (auto&& scalar_datatype = std::dynamic_pointer_cast(data_type)) { context().logger().debug("## read a scalar type `{}' ##", data_name); set_value_for_pdi_scalar_datatype(conduit_cpp::c_node(¤t_node), current.tree, data_name, *scalar_datatype, spec_ref); - } else if(auto &&array_datatype = std::dynamic_pointer_cast(data_type)) { + } else if (auto&& array_datatype = std::dynamic_pointer_cast(data_type)) { context().logger().debug("## read an array type `{}' ##", data_name); // check the array_datatype is a string PDI::Datatype_sptr type = array_datatype->subtype(); - { - auto&& array_type = std::dynamic_pointer_cast(type); + // case multi dimensional array ?? + while (auto&& array_type = std::dynamic_pointer_cast(type)) { type = array_type->subtype(); } - // case multi dimensional array ?? - // while (auto&& array_type = std::dynamic_pointer_cast(type)) { - // type = array_type->subtype(); - // } auto array_scalar_datatype = std::dynamic_pointer_cast(type); if (!array_scalar_datatype) { // context().logger().error("Array subtype of variable {} should be scalar type.", name); throw PDI::Spectree_error{current.tree, "Array subtype of variable `{}' should be scalar type.", current.name}; } PDI::Scalar_kind scalar_kind = array_scalar_datatype->kind(); - if (scalar_kind == PDI::Scalar_kind::SIGNED && array_scalar_datatype->buffersize() == sizeof(char)) - { + if (scalar_kind == PDI::Scalar_kind::SIGNED && array_scalar_datatype->buffersize() == sizeof(char)) { context().logger().debug("## scalar_kind is signed"); current_node.set_string(PDI::to_string(current.tree)); - } - else if (scalar_kind == PDI::Scalar_kind::UNSIGNED && array_scalar_datatype->buffersize() == sizeof(unsigned char)) - { + } else if (scalar_kind == PDI::Scalar_kind::UNSIGNED && array_scalar_datatype->buffersize() == sizeof(unsigned char)) { context().logger().info("## scalar_kind is unsigned"); current_node.set_string(PDI::to_string(current.tree)); - } - else { + } else { throw PDI::Spectree_error{current.tree, "The scalar type must be a string for `{}'.", current.name}; } } - } - break; + } break; case YAML_LITERAL_SCALAR_STYLE: case YAML_FOLDED_SCALAR_STYLE: case YAML_ANY_SCALAR_STYLE: @@ -434,7 +416,7 @@ void catalyst_plugin::run_catalyst_finalize() void catalyst_plugin::fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree) { - // check the function is called with a PC_tree containg + // check the function is called with a PC_tree containg auto name_spec = PC_get(tree, ".PDI_data_array"); if (PC_status(name_spec)) { // context().logger().error("No \"name\" child in PDI_data_array spec."); @@ -516,15 +498,14 @@ void catalyst_plugin::set_value_for_pdi_scalar_datatype( void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::string& name, conduit_index_t& value) { - if (!PC_status(spec)) { - long tmp_value; + if (!PC_status(spec)) { + long tmp_value; if (spec.node->type == YAML_SCALAR_NODE) { PDI::Expression data_expression{PDI::to_string(spec)}; PDI::Ref_r spec_ref = data_expression.to_ref(context()); if (!spec_ref) { // context().logger().error("The PDIData named \"{}\" is not readable.", name); throw PDI::System_error("The PDIData named \"{}\" is not readable.", name); - } auto data_type = spec_ref.type()->evaluate(context()); if (auto scalar_datatype = std::dynamic_pointer_cast(data_type)) { @@ -545,7 +526,7 @@ void catalyst_plugin::get_conduit_index_t_value(PC_tree_t& spec, const std::stri } else { throw PDI::Spectree_error{spec, "Supported only YAML_SCALAR_NODE for variable `{}'", name}; } - + // return value in conduit_index_t if (std::is_same::value) { value = tmp_value; @@ -791,6 +772,6 @@ std::string catalyst_plugin::read_pdi_execute_event_name() } else { throw PDI::Spectree_error{execute_spec, "No event name for catalyst plugin is given"}; } - context().logger().info("result: read_pdi_execute_event_name `{}'.",event_name); + context().logger().info("result: read_pdi_execute_event_name `{}'.", event_name); return event_name; } diff --git a/plugins/catalyst/pdi_catalyst_plugin.h b/plugins/catalyst/pdi_catalyst_plugin.h index 1b727c5e4..0764583b8 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.h +++ b/plugins/catalyst/pdi_catalyst_plugin.h @@ -66,7 +66,7 @@ class catalyst_plugin: public PDI::Plugin /// @param execute_spec The tree representing the execute section void create_catalyst_execute_conduit_node(conduit_node* execute_node, PC_tree_t& execute_spec); - /// @brief Fills a conduit node corresponding to array shared with pdi from a yaml tree. + /// @brief Fills a conduit node corresponding to array shared with pdi from a yaml tree. /// @param the node in which to operate /// @param tree specification tree containing a PDI_data_array void fill_node_with_pdi_data_array(conduit_node* node, PC_tree_t& tree); @@ -76,7 +76,7 @@ class catalyst_plugin: public PDI::Plugin /// @param name name of the array /// @param tree specification tree containing a PDI_data_array /// @param scalar_datatype type of the scalar - /// @param ref_r reference of the array + /// @param ref_r reference of the array void set_value_for_pdi_scalar_datatype( conduit_node* node, PC_tree_t& tree, @@ -90,7 +90,7 @@ class catalyst_plugin: public PDI::Plugin /// @param name name of the array /// @param tree specification tree containing a PDI_data_array /// @param array_datatype type of the array - /// @param ref_r reference of the array + /// @param ref_r reference of the array void set_value_for_pdi_array_datatype( conduit_node* node, const std::string& name, diff --git a/plugins/catalyst/tests_ghost/example.c b/plugins/catalyst/tests_ghost/example.c index 8ee681a72..3519e1928 100644 --- a/plugins/catalyst/tests_ghost/example.c +++ b/plugins/catalyst/tests_ghost/example.c @@ -32,24 +32,30 @@ #include "pdi.h" - -void create_coordinate_of_vertices( int dsize[2], int dstart[2], int local_size[2], int pcoord[2], double coords_x[dsize[0]+1][dsize[1]+1], double coords_y[dsize[0]+1][dsize[1]+1]){ - - // catalyst variables - int cells_ghost=1; - - size_t number_of_points[2]; - number_of_points[0] = dsize[0]+1; - number_of_points[1] = dsize[1]+1; - size_t total_number_of_points = number_of_points[0]*number_of_points[1]; - - // the first axis correspond to the y-coordinate. - for(int ix=0; ix Date: Mon, 4 May 2026 15:20:48 +0200 Subject: [PATCH 31/31] remove parent_tree in struct for ghost layers --- plugins/catalyst/pdi_catalyst_plugin.cxx | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/plugins/catalyst/pdi_catalyst_plugin.cxx b/plugins/catalyst/pdi_catalyst_plugin.cxx index 7a487ae1a..dfc51d738 100644 --- a/plugins/catalyst/pdi_catalyst_plugin.cxx +++ b/plugins/catalyst/pdi_catalyst_plugin.cxx @@ -173,11 +173,16 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( PC_tree_t tree; std::string name; conduit_node* parent_node; - PC_tree_t parent_tree; // Adding parent_tree for config error message }; + // value to keep the parent tree of the current.tree + PC_tree_t current_parent_tree; + + std::stack remaining_tree_and_parent_node; - remaining_tree_and_parent_node.push({execute_spec, "catalyst", execute_node, execute_spec}); + remaining_tree_and_parent_node.push({execute_spec, "catalyst", execute_node}); + current_parent_tree = execute_spec; // initialize parent tree as execute_spec + while (!remaining_tree_and_parent_node.empty()) { auto current = remaining_tree_and_parent_node.top(); remaining_tree_and_parent_node.pop(); @@ -196,7 +201,7 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( // loop over the meshes in the ghost layers tree for (int index = data_tree_size - 1; index >= 0; --index) { - list_vtkGhostType_to_create.emplace_back(context(), current.parent_node, current.tree, current.parent_tree, index); + list_vtkGhostType_to_create.emplace_back(context(), current.parent_node, current.tree, current_parent_tree, index); } } else { throw PDI::Spectree_error(current.tree, "ghost_layers node only support yaml mapping node."); @@ -223,7 +228,10 @@ void catalyst_plugin::read_info_for_creating_vtk_ghost( if (conduit_cpp::cpp_node(current.parent_node).has_path(current.name)) { auto current_node = conduit_cpp::cpp_node(current.parent_node)[current.name]; // Attention: creation of the node if doesn't exit - remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node), current.tree}); + remaining_tree_and_parent_node.push({value, PDI::to_string(key), conduit_cpp::c_node(¤t_node)}); + + current_parent_tree = current.tree; + } else { throw PDI::System_error("Error in creating vtkGhostType: a conduit node doesn't exist !!"); }