diff --git a/README.md b/README.md index 1147ce1..b289764 100644 --- a/README.md +++ b/README.md @@ -25,35 +25,69 @@ Agreement number: 956748. # Installation -To install in your pip enviroment, clone this repository and execute: -``` -pip install --editable . +There are multiple ways to install `PySEMTools` which are described below in more detail. For a quick-start, you can use: +```bash +# Install mpi4py (Assuming your mpi wrapper is cc) +env MPICC=$(which $cc) python -m pip install --no-cache-dir --no-binary=mpi4py mpi4py + +# Install pytorch (Assuming you have NVIDIA GPUs) +python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu130 + +# Install PySEMTools and all dependencies +git clone https://github.com/ExtremeFLOW/pySEMTools.git +cd pySEMTools/ +pip install --editable .[all] ``` -The `--editable` flag is optional, and will allow changes in the code of the package to be used +## For minimal functionality + +To avoid cluttering clusters with many modules, the following instructions install the minimun working version of pySEMTools. +This allows to read files and perform operations with numpy in parallel. + +### For developers, + +the easiest way to install and contribute changes is by cloning the repository: +```bash +git clone https://github.com/ExtremeFLOW/pySEMTools.git +cd pySEMTools/ +pip install --editable . +``` +Note that the `--editable` flag is optional, and will allow changes in the code of the package to be used directly without reinstalling. -## Dependencies +### For users, -### Mandatory +the option to install from `PyPI` will be soon available, which will allow to use: +```bash +pip install pysemtools +``` -You can install dependencies as follow: +## For full functionality +If the objective is to be able to run all examples and tests available in the package, then more optional dependencies are needed. +In this instance, the installation instruction must include the "[all]" argument, i.e.: +```bash +pip install --editable .[all] ``` -pip install numpy -pip install scipy -pip install pymech -pip install tdqm +or +```bash +pip install pysemtools[all] ``` + + +## Dependencies + +### Mandatory + #### mpi4py `mpi4py` is needed even when running in serial, as the library is built with communication in mind. It can typically be installed with: -``` +```bash pip install mpi4py ``` In some instances, such as in supercomputers, it is typically necesary that the mpi of the system is used. If `mpi4py` is not available as a module, we have found (so far) that installing it as follows works: -``` -export MPICC=$(which CC) +```bash +export MPICC=$(which cc) pip install mpi4py --no-cache-dir ``` where CC should be replaced by the correct C wrappers of the system (In a workstation you would probably need mpicc or so). It is always a good idea to contact support or check the specific documentation if things do not work. @@ -66,10 +100,10 @@ Some functionalities such as data streaming require the use of adios2. You can c #### PyTorch -Some classed are compatible with the pytorch module in case you have GPUs and want to use them in the process. We note that we only use pytorch optionally. There are versions that work exclusively with numpy on CPUs so pytorch can be avoided. +Some classes are compatible with the pytorch module in case you have GPUs and want to use them in the process. We note that we only use pytorch optionally. There are versions that work exclusively with numpy on CPUs so pytorch can be avoided. To install pytorch, you can check [here](https://pytorch.org/get-started/locally/). A simple installation for CUDA v12.1 on linux would look like this (following the instructions from the link): -``` +```bash pip3 install torch torchvision torchaudio ``` The process of installing pytorch in supercomputers is more intricate. In this case it is best to use the documentation of the specific cluster or contact support. @@ -83,4 +117,9 @@ To get an idea on how the codes are used, feel free to check the examples we hav You can use the provided tests to check if your installation is complete (Not all functionallities are currently tested but more to come). -The tests rely on `pytest`. To install it in your pip enviroment simply execute `pip install pytest`. To run the tests, execute the `pytest tests/` command from the root directory of the repository. +The tests rely on `pytest`. To install it in your pip enviroment simply execute `pip install pytest`. + +Tests are performed for more functionalities than those needed to use `PySEMTools` in its minimal version. To run them, make sure that you use the "[all]" or "[test]" argument when installing the package to +get all the dependencies. + +To run the tests, execute the `pytest tests/` command from the root directory of the repository. diff --git a/pyproject.toml b/pyproject.toml index ea10af1..0c56d47 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "pysemtools" -version = "0.1.0" +version = "1.1.0" requires-python = ">= 3.10" dependencies = [ "numpy", @@ -15,8 +15,8 @@ dependencies = [ ] [project.urls] -Repository = "https://github.com/me/spam.git" -Issues = "https://github.com/me/spam/issues" +Repository = "https://github.com/ExtremeFLOW/pySEMTools.git" +Issues = "https://github.com/ExtremeFLOW/pySEMTools/issues" [project.optional-dependencies] test = [ @@ -39,7 +39,10 @@ torch = [ "torchvision", "torchaudio" ] - +all = [ + "pysemtools[test]", + "pysemtools[dev]" +] [project.scripts] pysemtools_index-files = "pysemtools.cli.index_files:main" diff --git a/pysemtools/monitoring/__init__.py b/pysemtools/monitoring/__init__.py index 6f80bd7..0131b48 100644 --- a/pysemtools/monitoring/__init__.py +++ b/pysemtools/monitoring/__init__.py @@ -1,6 +1,5 @@ """ Monitoring tools for pySEMTools """ from .logger import Logger -from .memory_monitor import MemoryMonitor -__all__ = ["Logger", "MemoryMonitor"] \ No newline at end of file +__all__ = ["Logger"] \ No newline at end of file diff --git a/tests/test_autograd.py b/tests/test_autograd.py index bd2c587..70d2bdb 100644 --- a/tests/test_autograd.py +++ b/tests/test_autograd.py @@ -1,22 +1,36 @@ +try: + import torch + have_torch = True +except ImportError: + have_torch = False + from mpi4py import MPI -import torch import math import sys +import pytest from pysemtools.io.ppymech.neksuite import preadnek from pysemtools.datatypes.msh import Mesh -from pysemtools.interpolation.point_interpolator.multiple_point_interpolator_legendre_torch import LegendreInterpolator -from pysemtools.interpolation.point_interpolator.multiple_point_helper_functions_torch import legendre_basis_at_xtest, legendre_basis_derivative_at_xtest -from pysemtools.interpolation.point_interpolator.multiple_point_helper_functions_torch import apply_operators_3d +if have_torch: + from pysemtools.interpolation.point_interpolator.multiple_point_interpolator_legendre_torch import LegendreInterpolator + from pysemtools.interpolation.point_interpolator.multiple_point_helper_functions_torch import legendre_basis_at_xtest, legendre_basis_derivative_at_xtest + from pysemtools.interpolation.point_interpolator.multiple_point_helper_functions_torch import apply_operators_3d import numpy as np comm = MPI.COMM_WORLD -dtype = torch.float64 -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -if device == 'cuda:0': - torch.cuda.set_device(device) + +@pytest.mark.skipif(not have_torch, reason="Only test autograd if PyTorch is available") def test_autograd(): + if not have_torch: + print("PyTorch is not available, skipping autograd test.") + return + + dtype = torch.float64 + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if device == 'cuda:0': + torch.cuda.set_device(device) + fname = "examples/data/rbc0.f00001" data = preadnek(fname, comm) msh = Mesh(comm, data = data, create_connectivity=False) diff --git a/tests/test_examples.py b/tests/test_examples.py index 55f7e7b..902d679 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -4,6 +4,13 @@ import pytest import glob +try: + import torch + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + have_torch = True +except ImportError: + have_torch = False + @pytest.fixture(scope="session") def sem_data_path(): @@ -50,9 +57,13 @@ def test_calculus(): examples_path = "examples/3-calculus_in_sem_mesh/" - notebook_files = ["1-differentiation.ipynb", - "1.5-differentiation_torch.ipynb", - "2-integration.ipynb"] + if have_torch: + notebook_files = ["1-differentiation.ipynb", + "1.5-differentiation_torch.ipynb", + "2-integration.ipynb"] + else: + notebook_files = ["1-differentiation.ipynb", + "2-integration.ipynb"] passed = [] for notebook in notebook_files: @@ -141,10 +152,15 @@ def test_reduced_order_modelling(): def test_statistics(sem_data_path): examples_path = "examples/6-statistics/" - notebook_files = ["1-post_processing_mean_fields.ipynb", + if have_torch: + notebook_files = ["1-post_processing_mean_fields.ipynb", "4-Budgets.ipynb", "5-homogeneous_direction_averaging.ipynb", "6-Stats_from_fld.ipynb"] + else: + notebook_files = ["1-post_processing_mean_fields.ipynb", + "4-Budgets.ipynb", + "5-homogeneous_direction_averaging.ipynb",] passed = [] for notebook in notebook_files: diff --git a/tests/test_interpolator.py b/tests/test_interpolator.py index 5aa73ef..8f1235e 100644 --- a/tests/test_interpolator.py +++ b/tests/test_interpolator.py @@ -10,8 +10,10 @@ try: import torch device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + have_torch = True except ImportError: print('could not import torch') + have_torch = False device = 'cpu' # Initialize MPI from mpi4py import MPI @@ -279,8 +281,13 @@ def test_multiple_point_interpolator_numpy(): #============================================================================== +@pytest.mark.skipif(not have_torch, reason="Interpolation with torch back-end requires PyTorch to be installed") def test_multiple_point_interpolator_torch(): + if not have_torch: + print("PyTorch is not available, skipping multiple point interpolator torch test.") + return + # Read the original mesh data fname = 'examples/data/rbc0.f00001' data = preadnek(fname, comm) diff --git a/tests/test_probes.py b/tests/test_probes.py index 3ee78b9..c6ccbd5 100644 --- a/tests/test_probes.py +++ b/tests/test_probes.py @@ -1,3 +1,11 @@ +try: + import torch + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + have_torch = True +except ImportError: + print('could not import torch') + have_torch = False + from mpi4py import MPI #equivalent to the use of MPI_init() in C comm = MPI.COMM_WORLD @@ -69,7 +77,10 @@ def test_probes_msh_single(): # Create the probes object tlist = [] - point_int_l = ["single_point_legendre", "multiple_point_legendre_numpy", "multiple_point_legendre_torch"] + if have_torch: + point_int_l = ["single_point_legendre", "multiple_point_legendre_numpy", "multiple_point_legendre_torch"] + else: + point_int_l = ["single_point_legendre", "multiple_point_legendre_numpy"] global_tree_type_l = ["rank_bbox", "domain_binning"] local_data_structure_l = ["kdtree", "rtree", "hashtable"] find_points_iterative = [[False], [True, 1]] @@ -173,8 +184,12 @@ def test_probes_msh_double(): # Create the probes object tlist = [] - point_int_l = ["single_point_legendre", "multiple_point_legendre_numpy", "multiple_point_legendre_torch"] - point_int_l = ["multiple_point_legendre_torch"] + #point_int_l = ["single_point_legendre", "multiple_point_legendre_numpy", "multiple_point_legendre_torch"] + if have_torch: + point_int_l = ["multiple_point_legendre_torch"] + else: + point_int_l = ["multiple_point_legendre_numpy"] + global_tree_type_l = ["rank_bbox", "domain_binning"] for point_int in point_int_l: