Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 29 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,33 @@ on:
- main

jobs:
# docker-image:
# uses: ./.github/workflows/docker.yml
docker-image:
uses: ./.github/workflows/docker.yml
api-doc:
# needs: [docker-image]
uses: ./.github/workflows/sphinx.yml
uses: ./.github/workflows/sphinx.yml
linux:
runs-on: ubuntu-latest
needs: [docker-image]
container:
image: ghcr.io/llnl/gplasdi/lasdi_env:latest
options: --user 1001 --privileged
volumes:
- /mnt:/mnt
steps:
- name: Cancel previous runs
uses: styfle/cancel-workflow-action@0.11.0
with:
access_token: ${{ github.token }}
# - name: Set Swap Space
# uses: pierotofy/set-swap-space@master
# with:
# swap-size-gb: 10
- name: Check out LaSDI
uses: actions/checkout@v3
- name: Build LaSDI
run: |
cd ${GITHUB_WORKSPACE}
pip install .
- name: Test CNN2D
run: |
pytest -vrx tests/test_CNN2D.py
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ RUN sudo apt-get install -yq python3-dev
RUN sudo apt-get install -yq python3-pip
RUN sudo apt-get install python-is-python3
RUN sudo python -m pip install --upgrade pip
RUN sudo python -m pip install sphinx sphinx-autoapi sphinx_rtd_theme
#RUN sudo pip3 install numpy scipy argparse tables PyYAML h5py pybind11 pytest mpi4py merlin
# RUN sudo python -m pip install sphinx sphinx-autoapi sphinx_rtd_theme
RUN sudo pip3 install torch==2.0.1 numpy==1.23.0 scikit-learn==1.3 scipy==1.10 pyyaml==6.0 matplotlib==3.8.0 argparse==1.1 h5py pytest pytest-cov
#
RUN sudo apt-get clean -q

Expand Down
1 change: 1 addition & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
]

autoapi_dirs = ['../../src']
autoapi_python_class_content = 'both'

napoleon_google_docstring = False
napoleon_use_param = False
Expand Down
30 changes: 26 additions & 4 deletions examples/burgers1d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "d67dad03-9d76-4891-82ff-7e19d1369a24",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -78,7 +78,7 @@
"outputs": [],
"source": [
"# Specify the restart file you have.\n",
"filename = 'lasdi_10_01_2024_17_09.npy'\n",
"filename = 'restarts/burgers1d.restart.npy'\n",
"\n",
"import yaml\n",
"from lasdi.workflow import initialize_trainer\n",
Expand Down Expand Up @@ -111,7 +111,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "dcdac0c2",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -179,6 +179,28 @@
"n_coef = restart_file['latent_dynamics']['ncoefs']"
]
},
{
"cell_type": "markdown",
"id": "03a96b35",
"metadata": {},
"source": [
"# Loss history"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0163864",
"metadata": {},
"outputs": [],
"source": [
"plt.figure(1)\n",
"plt.loglog(trainer.training_loss)\n",
"plt.loglog(trainer.ae_loss)\n",
"plt.loglog(trainer.ld_loss)\n",
"plt.loglog(trainer.coef_loss)"
]
},
{
"cell_type": "markdown",
"id": "1262a0c3",
Expand Down Expand Up @@ -306,7 +328,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "11c629e4",
"id": "ba098b6d",
"metadata": {},
"outputs": [],
"source": []
Expand Down
15 changes: 10 additions & 5 deletions src/lasdi/gplasdi.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,11 @@ def __init__(self, physics, autoencoder, latent_dynamics, param_space, config):
self.X_train = torch.Tensor([])
self.X_test = torch.Tensor([])

self.training_loss = []
self.ae_loss = []
self.ld_loss = []
self.coef_loss = []

return

def train(self):
Expand All @@ -163,11 +168,6 @@ def train(self):
n_train = ps.n_train()
ld = self.latent_dynamics

self.training_loss = []
self.ae_loss = []
self.ld_loss = []
self.coef_loss = []

'''
determine number of iterations.
Perform n_iter iterations until overall iterations hit max_iter.
Expand Down Expand Up @@ -291,4 +291,9 @@ def load(self, dict_):
self.optimizer.load_state_dict(dict_['optimizer'])
if (self.device != 'cpu'):
optimizer_to(self.optimizer, self.device)

self.training_loss = dict_['training_loss']
self.ae_loss = dict_['ae_loss']
self.ld_loss = dict_['ld_loss']
self.coef_loss = dict_['coeff_loss']
return
175 changes: 0 additions & 175 deletions src/lasdi/latent_space.py

This file was deleted.

Loading