diff --git a/.gitignore b/.gitignore index a2b2f4d7..0eb00f6e 100644 --- a/.gitignore +++ b/.gitignore @@ -179,5 +179,10 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ - +old/ +.bash_history +.local/ +.viminfo +hyperplanes_diff/ +linearly_separable/ diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..50577e2a --- /dev/null +++ b/Makefile @@ -0,0 +1,29 @@ +PYTHON3 := $(shell which python3 2>/dev/null) + +PYTHON := python3 + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " clean to delete all temporary, cache, and build files" + @echo " format [check=1] to apply black formatter; use with 'check=1' to check instead of modify (requires black)" + @echo " lint to run pylint on source files" + +.PHONY : clean +clean: + rm -rf src/qml_benchmarks.egg-info/ + rm -rf src/qml_benchmarks/__pycache__/ + rm -rf src/qml_benchmarks/models/__pycache__/ + +.PHONY:format +format: +ifdef check + black -l 100 ./src/qml_benchmarks --check +else + black -l 100 ./src/qml_benchmarks +endif + +.PHONY: lint +lint: + pylint src/qml_benchmarks --rcfile .pylintrc + diff --git a/nersc/Dockerfile.ubu22-PennyLane b/nersc/Dockerfile.ubu22-PennyLane new file mode 100644 index 00000000..d4849da5 --- /dev/null +++ b/nersc/Dockerfile.ubu22-PennyLane @@ -0,0 +1,51 @@ +FROM ubuntu:22.04 + +# time podman-hpc build -f Dockerfile.ubu22-PennyLane -t balewski/ubu22-pennylane:p5 . +#>>> real 6m47.959s + +# podman commit -a "Jan Balewski" c3d48cf15876XXX balewski/XXXkit-qml:p2ch +# on laptop: +# podman run -it balewski/ubu22-pennylane:p0 bash +# time python3 -c 'import pennylane' + +# is needed by tzdada which sets sth for one of libs in section 2 +ARG DEBIAN_FRONTEND=noninteractive +ENV TZ=America/Los_Angeles + +RUN echo 1a-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA OS update && \ + apt-get update && \ + apt-get install -y locales autoconf automake gcc g++ make vim wget ssh openssh-server sudo git emacs aptitude build-essential xterm python3-pip python3-tk python3-scipy python3-dev iputils-ping net-tools screen feh hdf5-tools python3-bitstring mlocate graphviz tzdata x11-apps && \ + apt-get clean all + +# Forbid installing qiskit-terra for qiskit 1.0 +RUN echo 2b-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA Qiskit 1.0 libs && \ + pip3 install -c https://qisk.it/1-0-constraints qiskit[visualization] qiskit-ibm-runtime qiskit-machine-learning qiskit_ibm_experiment qiskit-aer + + +RUN echo 2c-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA ML+QML libs && \ + pip3 install scikit-learn pandas seaborn[stats] networkx[default] optuna + + +RUN echo 2d-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA python libs && \ + pip3 install --upgrade pip && \ + pip3 install matplotlib h5py ruamel.yaml scipy jupyter notebook bitstring + +# notes: python3-tk instals TK for matplotlib to display graphic +RUN echo 2e-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA python libs && \ + pip3 install --upgrade Pillow + + +# based on https://pennylane.ai/install/ +# based on https://pytorch.org/get-started/locally/ + +RUN echo 3a-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA pennylane && \ + pip3 install pennylane --upgrade && \ + pip3 install pennylane-lightning pennylane-lightning[gpu] pennylane-sf pennylane-qiskit pennylane-cirq pennylane-catalyst scipy==1.11.4 && \ + pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + +RUN echo 3b-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA JAX && \ + pip3 install "jax[cpu]"==0.4.23 && \ + pip3 install jaxopt optax + + + diff --git a/nersc/container/Dockerfile.ubu22-PennyLane b/nersc/container/Dockerfile.ubu22-PennyLane new file mode 100644 index 00000000..ec5bd051 --- /dev/null +++ b/nersc/container/Dockerfile.ubu22-PennyLane @@ -0,0 +1,51 @@ +FROM ubuntu:22.04 + +# time podman-hpc build -f Dockerfile.ubu22-PennyLane -t balewski/ubu22-pennylane:p5 . +#>>> real 6m47.959s + +# podman commit -a "Jan Balewski" c3d48cf15876XXX balewski/XXXkit-qml:p2ch +# on laptop: +# podman run -it balewski/ubu22-pennylane:p0 bash +# time python3 -c 'import pennylane' + +# is needed by tzdada which sets sth for one of libs in section 2 +ARG DEBIAN_FRONTEND=noninteractive +ENV TZ=America/Los_Angeles + +RUN echo 1a-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA OS update && \ + apt-get update && \ + apt-get install -y locales autoconf automake gcc g++ make vim wget ssh openssh-server sudo git emacs aptitude build-essential xterm python3-pip python3-tk python3-scipy python3-dev iputils-ping net-tools screen feh hdf5-tools python3-bitstring mlocate graphviz tzdata x11-apps && \ + apt-get clean all + +# Forbid installing qiskit-terra for qiskit 1.0 +RUN echo 2b-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA Qiskit 1.0 libs && \ + pip3 install -c https://qisk.it/1-0-constraints qiskit[visualization] qiskit-ibm-runtime qiskit-machine-learning qiskit_ibm_experiment qiskit-aer + + +RUN echo 2c-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA ML+QML libs && \ + pip3 install scikit-learn pandas seaborn[stats] networkx[default] optuna + + +RUN echo 2d-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA python libs && \ + pip3 install --upgrade pip && \ + pip3 install matplotlib h5py ruamel.yaml scipy jupyter notebook bitstring + +# notes: python3-tk instals TK for matplotlib to display graphic +RUN echo 2e-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA python libs && \ + pip3 install --upgrade Pillow + + +# based on https://pennylane.ai/install/ +# based on https://pytorch.org/get-started/locally/ + +RUN echo 3a-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA pennylane && \ + pip3 install pennylane --upgrade && \ + pip3 install pennylane-lightning pennylane-lightning[gpu] pennylane-sf pennylane-qiskit pennylane-cirq pennylane-catalyst scipy==1.11.4 && \ + pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + +RUN echo 3b-AAAAAAAAAAAAAAAAAAAAAAAAAAAAA JAX && \ + pip3 install -U "jax[cpu]" && \ + pip3 install jaxopt optax + + + diff --git a/nersc/container/Readme b/nersc/container/Readme new file mode 100644 index 00000000..626b9a2d --- /dev/null +++ b/nersc/container/Readme @@ -0,0 +1,135 @@ +Usage of Podman container for PennyLane + +details are here +https://docs.google.com/document/d/1LHYlTXtOyA5vZSrJF0IdZf3L88DcguMWXyHAPLV-MvU/edit?usp=sharing + + +Summary: +This is an instruction on how to build and use a Podman image. +The example consists of three parts: +A) Building the image, which I've already completed on PM. +B) Running the image on PM in three modes: simple interactive, advanced interactive, and a Slurm job that executes your code inside a podman image. +C) Running the same image on your laptop. You'll need to build it as described in A), but then running it is straightforward. + + + += = = +A = = = = = = = = = = building container (one time) = = = = = = = = = += = = +Dockerfile: Dockerfile.ubu22-PennyLane +Laptop: + podman build -f Dockerfile.ubu22-PennyLane -t balewski/ubu22-pennylane:p5 . +Perlmutter + podman-hpc build -f Dockerfile.ubu22-PennyLane -t balewski/ubu22-pennylane:p5 . + POD_PUB=/cfs/cdirs/nstaff/balewski/podman_common/ + podman-hpc --squash-dir /global/$POD_PUB migrate balewski/ubu22-pennylane:p5 + chmod -R a+rx /global/$POD_PUB # to allow anyone to use this image + + Note, the instruction above is for user:balewski who has access to CFS/nstaff - change the strings as needed + + + += = = +B = = = = = = = = = = Perlmutter @ NERSC = = = = = = = = = += = = + +B.1 - - - - - - simple interactive use on a worker node - - - - - - - + +ssh perlmutter +export PS1="\h:\W> " # to reduce too long names of directoriesexport PS1="\h:\W> " # to reduce too long names of directories + +POD_PUB=/cfs/cdirs/nstaff/balewski/podman_common/ +export PODMANHPC_ADDITIONAL_STORES=/dvs_ro/$POD_PUB +cd $SCRATCH/tmp +salloc -q interactive -C cpu -t 4:00:00 -A nstaff +podman-hpc run -it balewski/ubu22-pennylane:p5 bash + python3 -c 'import pennylane' +exit + + +B.2 - - - - - - advanced interactive use on a worker node - - - - - - - +copy & customize https://github.com/balewski/quantumMind/blob/main/PennyLane/pm_podman.source +to configure shortcuts to your liking + +ssh perlmutter +salloc -q interactive -C cpu -t 4:00:00 -A nstaff +source pm_podman.source + +[ do your work ] + +exit #from podman +exit # from salloc + + + +B.3 - - - - - - advanced : Slurm job with multiple task using Podman image- - - - - - - + copy and customize https://github.com/balewski/quantumMind/tree/main/PennyLane/qml_intro/ +batchPodman.slr +wrap_podman.sh + +You will edit code stored in CFS area but running job will read/write from/to SCRATCH using its copy. Do NOT start Podman image manually + +ssh perlmutter +ssh CFS (to your area ) +sbatch batchPodman.slr + + +B.4 - - - - - - advanced : install project spceciffic software, NOT recommended but possible + +ssh pm + +IMG=jbowles/ubu22-pennylane:p5 +POD_PUB=/dvs_ro/cfs/cdirs/m4139/qml-benchmarks/nersc/podman/ +export PODMANHPC_ADDITIONAL_STORES=$POD_PUB +CFSH=$SCRATCH/pennylane_wrk # for Jan +#CFSH=/global/cfs/cdirs/m4139 # for Joseph +podman-hpc run -it --volume $CFSH/qml-benchmarks:/root --workdir /root $IMG bash +pip3 install --user . # inside image, create private ./local + +exit + +Testing: +podman-hpc run -it … +python3 -u -c "import qml_benchmarks " +exit + +AND when lanuching podman image you must have this volume mount, on the top of all other volumes you need: + + --volume $CFSH/qml-benchmarks:/root + +B.5 - - - - - - advanced : jupiter notebook on the laptop powered by Podman PennyLane image running on PM - - - - - - - + Easy to do + + + + += = = +C = = = = = = = = = = Laptop = = = = = = = = = += = = + +C.1 - - - - - - simple interactive use on a laptop - - - - - - - +$ podman run -it balewski/ubu22-pennylane:p3 bash + python3 -c 'import pennylane' +exit + + +C.2 - - - - - - advanced w/ predefined volume mounts on a laptop - - - - - - - +copy & customize https://github.com/balewski/quantumMind/blob/main/PennyLane/laptop_podman.source +to configure shortcuts to your liking + +source laptop_podman.source + +[ do your work ] + +exit #from podman + +If you need to reset podman, do : source restart_podman.source + + +C.4 - - - - - - advanced : jupiter notebook on the laptop powered by Podman PennyLane image running on the laptop - - - - - - - + +source laptop_podman.source jnb +(exec inside running image) jupyter notebook --ip 0.0.0.0 --no-browser --allow-root --port 8833 +copy http string and paste into local browser , e.g: http://127.0.0.1:8833/tree?token=7c5cdf5e5c4f1a9d2a616a739988132d59f1a7ca3f4c0779 + +Troubleshooting: if JNB remains blank change port ID in laptop_podman.source and restart the image diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX.csv new file mode 100644 index 00000000..4633b5f3 --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +construct_kernel_time,training_time,predict_time,hyperparameters +3860.4390046596527,3860.4435765743256,3755.371912240982,"{'repeats': 10, 'use_jax': True, 'vmap': True, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_13d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX.csv new file mode 100644 index 00000000..f7aec58e --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +construct_kernel_time,training_time,predict_time,hyperparameters +2.4044647216796875,2.408006429672241,1.8492097854614258,"{'repeats': 10, 'use_jax': True, 'vmap': True, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/IQPKernelClassifier_linearly_separable_2d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX.csv new file mode 100644 index 00000000..9e27eb16 --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +first_train_step,first_train_step_std,consec_train_step,consec_train_step_std,predict_time,predict_time_std,hyperparameters +182.53770942687987,9.854155175126856,24.854176211597945,0.899221214100941,191.73146567344665,13.744482148495923,"{'n_layers': 15, 'repeats': 10, 'use_jax': True, 'vmap': True, 'max_steps': 100, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..bd314f52 --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.2 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.26 +jaxlib 0.4.26 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.3 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.0.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.4.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_15d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX.csv new file mode 100644 index 00000000..257a5e6e --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +first_train_step,first_train_step_std,consec_train_step,consec_train_step_std,predict_time,predict_time_std,hyperparameters +6.239000558853149,0.13502860069274902,0.0019811849401454736,9.583583985916306e-05,2.7535842657089233,0.0826483964920044,"{'n_layers': 15, 'repeats': 10, 'use_jax': True, 'vmap': True, 'max_steps': 100, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..bd314f52 --- /dev/null +++ b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.2 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.26 +jaxlib 0.4.26 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.3 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.0.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.4.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/IQPVariationalClassifier_linearly_separable_2d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX.csv new file mode 100644 index 00000000..7d9877ce --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +construct_kernel_time,training_time,predict_time,hyperparameters +68.69405555725098,68.69814586639404,70.27257776260376,"{'trotter_steps': 5, 'use_jax': True, 'vmap': True, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_15d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX.csv new file mode 100644 index 00000000..90cfb263 --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +construct_kernel_time,training_time,predict_time,hyperparameters +143.43669366836548,143.44123721122742,146.31366968154907,"{'trotter_steps': 5, 'use_jax': True, 'vmap': True, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_17d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX.csv new file mode 100644 index 00000000..10f48e95 --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +construct_kernel_time,training_time,predict_time,hyperparameters +987.022438287735,987.0266752243042,905.2389245033264,"{'trotter_steps': 5, 'use_jax': True, 'vmap': True, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/ProjectedQuantumKernel_linearly_separable_20d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX.csv new file mode 100644 index 00000000..bb1d1bf0 --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +first_train_step,first_train_step_std,consec_train_step,consec_train_step_std,predict_time,predict_time_std,hyperparameters +118.64471855163575,13.176545198064549,5.997364590866397,0.3148851718071143,603.2157266139984,30.745927128215357,"{'n_layers': 4, 'batch_size': 16, 'use_jax': True, 'vmap': True, 'max_steps': 100, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_13d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX.csv new file mode 100644 index 00000000..6ee18e68 --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +first_train_step,first_train_step_std,consec_train_step,consec_train_step_std,predict_time,predict_time_std,hyperparameters +186.66433362960817,26.55748553966961,28.853529106005272,2.0727284764603633,2363.9554340362547,173.4738127352816,"{'n_layers': 4, 'batch_size': 16, 'use_jax': True, 'vmap': True, 'max_steps': 100, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_16d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX.csv b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX.csv new file mode 100644 index 00000000..0f549554 --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX.csv @@ -0,0 +1,2 @@ +first_train_step,first_train_step_std,consec_train_step,consec_train_step_std,predict_time,predict_time_std,hyperparameters +18.878392934799194,0.00267791748046875,0.0069307666836362905,3.6954879760742188e-06,5.774362087249756,0.06407976150512695,"{'n_layers': 4, 'batch_size': 16, 'use_jax': True, 'vmap': True, 'max_steps': 100, 'jit': True}" diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX_packages.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX_packages.txt new file mode 100644 index 00000000..f5d4f0ca --- /dev/null +++ b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX_packages.txt @@ -0,0 +1,236 @@ +Package Version +------------------------- -------------- +absl-py 2.1.0 +alembic 1.13.1 +annotated-types 0.6.0 +antlr4-python3-runtime 4.9.2 +anyio 4.3.0 +appdirs 1.4.4 +argon2-cffi 23.1.0 +argon2-cffi-bindings 21.2.0 +arrow 1.3.0 +asttokens 2.4.1 +astunparse 1.6.3 +async-lru 2.0.4 +attrs 23.2.0 +autograd 1.6.2 +autoray 0.6.9 +Babel 2.14.0 +beautifulsoup4 4.12.3 +beniget 0.4.1 +bitstring 3.1.7 +bleach 6.1.0 +cachetools 5.3.3 +certifi 2024.2.2 +cffi 1.16.0 +charset-normalizer 3.3.2 +chex 0.1.86 +cirq-core 1.3.0 +cirq-pasqal 1.3.0 +click 8.1.7 +cloudpickle 3.0.0 +colorlog 6.8.2 +comm 0.2.2 +contourpy 1.2.1 +cryptography 42.0.5 +cycler 0.12.1 +dask 2024.4.2 +dbus-python 1.2.18 +debugpy 1.8.1 +decorator 4.4.2 +defusedxml 0.7.1 +diastatic-malt 2.15.1 +dill 0.3.8 +distro 1.7.0 +duet 0.2.9 +etils 1.7.0 +exceptiongroup 1.2.1 +executing 2.0.1 +fastdtw 0.3.4 +fastjsonschema 2.19.1 +filelock 3.13.1 +fire 0.6.0 +flax 0.8.3 +fonttools 4.51.0 +fqdn 1.5.1 +fsspec 2024.3.1 +future 1.0.0 +gast 0.5.2 +greenlet 3.0.3 +h11 0.14.0 +h5py 3.11.0 +httpcore 1.0.5 +httpx 0.27.0 +ibm-cloud-sdk-core 3.20.0 +ibm-platform-services 0.53.6 +idna 3.7 +importlib_metadata 7.1.0 +importlib_resources 6.4.0 +ipykernel 6.29.4 +ipython 8.24.0 +ipywidgets 8.1.2 +isoduration 20.11.0 +jax 0.4.23 +jaxlib 0.4.23 +jaxopt 0.8.3 +jedi 0.19.1 +Jinja2 3.1.3 +joblib 1.4.0 +json5 0.9.25 +jsonpointer 2.4 +jsonschema 4.21.1 +jsonschema-specifications 2023.12.1 +jupyter 1.0.0 +jupyter_client 8.6.1 +jupyter-console 6.6.3 +jupyter_core 5.7.2 +jupyter-events 0.10.0 +jupyter-lsp 2.2.5 +jupyter_server 2.14.0 +jupyter_server_terminals 0.5.3 +jupyterlab 4.1.8 +jupyterlab_pygments 0.3.0 +jupyterlab_server 2.27.1 +jupyterlab_widgets 3.0.10 +kiwisolver 1.4.5 +lark-parser 0.12.0 +llvmlite 0.42.0 +locket 1.0.0 +Mako 1.3.3 +markdown-it-py 3.0.0 +MarkupSafe 2.1.5 +matplotlib 3.8.4 +matplotlib-inline 0.1.7 +mdurl 0.1.2 +mistune 3.0.2 +ml-dtypes 0.4.0 +mpmath 1.3.0 +msgpack 1.0.8 +nbclient 0.10.0 +nbconvert 7.16.4 +nbformat 5.10.4 +nest-asyncio 1.6.0 +networkx 3.3 +notebook 7.1.3 +notebook_shim 0.2.4 +numba 0.59.1 +numpy 1.26.4 +olefile 0.46 +opt-einsum 3.3.0 +optax 0.2.2 +optuna 3.6.1 +orbax-checkpoint 0.5.10 +overrides 7.7.0 +packaging 24.0 +pandas 2.2.2 +pandocfilters 1.5.1 +parso 0.8.4 +partd 1.4.1 +patsy 0.5.6 +pbr 6.0.0 +PennyLane 0.35.1 +PennyLane-Catalyst 0.5.0 +PennyLane-Cirq 0.34.0 +PennyLane_Lightning 0.35.1 +PennyLane_Lightning_GPU 0.35.1 +PennyLane-qiskit 0.35.1 +PennyLane-SF 0.29.0 +pexpect 4.9.0 +pillow 10.3.0 +pip 24.0 +platformdirs 4.2.1 +ply 3.11 +prometheus_client 0.20.0 +prompt-toolkit 3.0.43 +protobuf 5.26.1 +psutil 5.9.8 +ptyprocess 0.7.0 +pure-eval 0.2.2 +pycparser 2.22 +pydantic 2.7.1 +pydantic_core 2.18.2 +pydantic-settings 2.2.1 +pydot 2.0.0 +Pygments 2.17.2 +PyGObject 3.42.1 +PyJWT 2.8.0 +pylatexenc 2.10 +pyparsing 3.1.2 +pyspnego 0.10.2 +python-dateutil 2.9.0.post0 +python-dotenv 1.0.1 +python-json-logger 2.0.7 +pythran 0.10.0 +pytz 2024.1 +PyYAML 6.0.1 +pyzmq 26.0.2 +qiskit 1.0.2 +qiskit-aer 0.14.1 +qiskit-algorithms 0.3.0 +qiskit-ibm-experiment 0.4.7 +qiskit-ibm-provider 0.11.0 +qiskit-ibm-runtime 0.20.0 +qiskit-machine-learning 0.7.2 +qml_benchmarks 0.1 +qtconsole 5.5.1 +QtPy 2.4.1 +quantum-blackbird 0.5.0 +quantum-xir 0.2.2 +referencing 0.35.0 +requests 2.31.0 +requests-ntlm 1.2.0 +rfc3339-validator 0.1.4 +rfc3986-validator 0.1.1 +rich 13.7.1 +rpds-py 0.18.0 +ruamel.yaml 0.18.6 +ruamel.yaml.clib 0.2.8 +rustworkx 0.14.2 +scikit-learn 1.4.2 +scipy 1.11.4 +seaborn 0.13.2 +semantic-version 2.10.0 +Send2Trash 1.8.3 +setuptools 59.6.0 +six 1.16.0 +sniffio 1.3.1 +sortedcontainers 2.4.0 +soupsieve 2.5 +SQLAlchemy 2.0.29 +ssh-import-id 5.11 +stack-data 0.6.3 +statsmodels 0.14.2 +stevedore 5.2.0 +StrawberryFields 0.23.0 +symengine 0.11.0 +sympy 1.12 +tensorstore 0.1.58 +termcolor 2.4.0 +terminado 0.18.1 +thewalrus 0.21.0 +threadpoolctl 3.5.0 +tinycss2 1.3.0 +toml 0.10.2 +tomli 2.0.1 +tomlkit 0.12.4 +toolz 0.12.1 +torch 2.3.0+cpu +torchaudio 2.3.0+cpu +torchvision 0.18.0+cpu +tornado 6.4 +tqdm 4.66.2 +traitlets 5.14.3 +types-python-dateutil 2.9.0.20240316 +typing_extensions 4.11.0 +tzdata 2024.1 +uri-template 1.3.0 +urllib3 2.2.1 +wcwidth 0.2.13 +webcolors 1.13 +webencodings 0.5.1 +websocket-client 1.8.0 +websockets 12.0 +wheel 0.37.1 +widgetsnbextension 4.0.10 +xanadu-cloud-client 0.3.2 +zipp 3.18.1 diff --git a/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX_scontrol.txt b/nersc/performance_indicators/JAX/QuantumMetricLearner_linearly_separable_2d_performance_indicators_JAX_scontrol.txt new file mode 100644 index 00000000..e69de29b diff --git a/nersc/performance_indicators/README.md b/nersc/performance_indicators/README.md new file mode 100644 index 00000000..8c131e62 --- /dev/null +++ b/nersc/performance_indicators/README.md @@ -0,0 +1,58 @@ +# Instructions for running performance indicators + +To get the data for a given model we need to run the python script `perf_ind_variational.py` (for variational models) or +`perf_ind_kernel.py` (for kernel models). + +Within the scripts there are a number of settings that can be changed at the start. This should make it easy to reuse +the same code when we have updated models. **Please make sure you chose a unique choice of `perf_ind_name` when gathering results for a new +workflow** (i.e. the name of the workflow) since results will be overwritten if the same name is used twice. + +The performance indicators use the hyperparameter settings specified in `hyperparam_settings.yaml`. These are chosen to +be those which require the most compute and shouldn't be changed. + +To run a performance indicator you need to edit a number of things in the file `nersc/submit_job.slr`: + +- Choose the number of features by editing `numFeatures=X` + + +- Chose whether it is for a variational or kernel model by editing the line + + `CMD=" python3 -u performance_indicators/XXX.py --numFeatures $numFeatures --inputPath performance_indicators/linearly_separable/ "` + + where `XXX` is either `perf_ind_kernel` or `perf_ind_variational` + + +- Decide the maximum job time (format HH:MM:SS): + + `#SBATCH -q shared -t 1:00:00` + + +- Decide the number of CPUs to use (see below): + + `#SBATCH --cpus-per-task=X` + +To launch a job run + +`sbatch submit_job.slr` + +This will add the job to the queue; when finished the results are stored in `performance_indicators/perf_ind_name`. +Add the jobID to the google sheet for reference. + +## Determinining the number of CPUs +To avoid wasting resources, you should first determine how many CPUs are required. To have an idea of +CPU usage, launch a job for a short amount of time, then kill it. Run `seff JOBID` and check the CPU usage. +Repeat the process, decreasing the number of CPUs each time until a reasonable number of CPUs is found (i.e. most are in use). + +Add this choice to the `CPUs` row of the google sheet. + +## Recording memory and CPU usage +Once the job has finished, run + +`seff JOBID` + +where JOBID is the slurm job id. Add the corresponding info to the google sheet. + + + + + diff --git a/nersc/performance_indicators/check.py b/nersc/performance_indicators/check.py new file mode 100644 index 00000000..c27dfdf3 --- /dev/null +++ b/nersc/performance_indicators/check.py @@ -0,0 +1,7 @@ +import numpy as np + +array = np.random.rand(10,10) + +print('testing') +np.savetxt("check1.txt", array) +np.savetxt("WORKDIR/check2.txt", array) diff --git a/nersc/performance_indicators/generate_linearly_separable.py b/nersc/performance_indicators/generate_linearly_separable.py new file mode 100644 index 00000000..477f3273 --- /dev/null +++ b/nersc/performance_indicators/generate_linearly_separable.py @@ -0,0 +1,40 @@ +# Copyright 2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generate datasets for the LINEARLY SEPARABLE benchmark.""" + +import os +import numpy as np +from sklearn.model_selection import train_test_split +from qml_benchmarks.data import generate_linearly_separable + +np.random.seed(42) + +os.makedirs("linearly_separable", exist_ok=True) + +n_samples = 300 + +for n_features in range(2, 21): + margin = 0.02 * n_features + + X, y = generate_linearly_separable(n_samples, n_features, margin) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) + + name_train = f"linearly_separable/linearly_separable_{n_features}d_train.csv" + data_train = np.c_[X_train, y_train] + np.savetxt(name_train, data_train, delimiter=",") + + name_test = f"linearly_separable/linearly_separable_{n_features}d_test.csv" + data_test = np.c_[X_test, y_test] + np.savetxt(name_test, data_test, delimiter=",") diff --git a/nersc/performance_indicators/hyperparam_settings.yaml b/nersc/performance_indicators/hyperparam_settings.yaml new file mode 100644 index 00000000..525220d4 --- /dev/null +++ b/nersc/performance_indicators/hyperparam_settings.yaml @@ -0,0 +1,14 @@ +IQPVariationalClassifier: + n_layers: 15 + repeats: 10 + +QuantumMetricLearner: + n_layers: 4 + batch_size: 16 + +ProjectedQuantumKernel: + trotter_steps: 5 + +IQPKernelClassifier: + repeats: 10 + diff --git a/nersc/performance_indicators/perf_ind_kernel.py b/nersc/performance_indicators/perf_ind_kernel.py new file mode 100644 index 00000000..e633af24 --- /dev/null +++ b/nersc/performance_indicators/perf_ind_kernel.py @@ -0,0 +1,108 @@ +import qml_benchmarks +import pennylane as qml +import jax +import jax.numpy as jnp +import numpy as np +import time +import csv +import os +import yaml +import subprocess +from qml_benchmarks.hyperparam_search_utils import read_data + +import argparse +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v","--verbosity",type=int,choices=[0, 1, 2,3,4], help="increase output verbosity", default=1, dest='verb') + parser.add_argument("--inputPath",default='linearly_separable/',help='input data location') + parser.add_argument('-n','--numFeatures',type=int,default=2, help="dataset dimension ") + + args = parser.parse_args() + + print( 'myArg-program:',parser.prog) + for arg in vars(args): print( 'myArg:',arg, getattr(args, arg)) + + #assert os.path.exists(args.outPath) + return args + +#================================= +#================================= +# M A I N +#================================= +#================================= +if __name__=="__main__": + args=get_parser() + + ####### SETTINGS ####################### + # You only need to change this to make a different performance indicator + + #define the model + from qml_benchmarks.models.projected_quantum_kernel import ProjectedQuantumKernel as Model + + #implementation attributes of model + use_jax = False + vmap = True + jit = True + model_settings = {'use_jax': use_jax, 'vmap': vmap, 'jit': jit} + + perf_ind_name = 'CAT_CPU' #a name for the performance indicator used for naming files + + ################################# + + n_features = args.numFeatures # dataset dimension + model_name = Model().__class__.__name__ + + # get the 'worst case' hyperparameter settings for the model (those that require the most resources) + with open('performance_indicators/hyperparam_settings.yaml', "r") as file: + hp_settings = yaml.safe_load(file) + + hyperparams = {**hp_settings[model_name], **model_settings} + print(hyperparams) + assert os.path.exists(args.inputPath) + #inpF1=f'../../paper/benchmarks/linearly_separable/linearly_separable_{n_features}d_train.csv' + inpF1=os.path.join(args.inputPath,'linearly_separable_%dd_train.csv'%(n_features)) + inpF2=inpF1.replace('train','test') + print('M:inpF1',inpF1) + X_train,y_train = read_data(inpF1) + print('M:inpF2',inpF2) + X_test,y_test = read_data(inpF2) + + model = Model(**hyperparams) + model.fit(X_train, y_train) + + #kernel construction time + construct_kernel_time = model.construct_kernel_time_ + #full training time + training_time = model.training_time_ + #prediction time + time0 = time.time() + model.predict(X_test) + predict_time = time.time() - time0 + + + #write to csv + data = [construct_kernel_time, training_time, predict_time, hyperparams] + + filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}.csv" + packages_filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}_packages.txt" + scontrol_filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}_scontrol.txt" + + header = ['construct_kernel_time', 'training_time', 'predict_time', 'hyperparameters'] + + if not os.path.exists('performance_indicators/'+perf_ind_name): + # Create the directory + os.mkdir('performance_indicators/'+perf_ind_name) + + #write perf indicator data + with open('performance_indicators/'+perf_ind_name+'/'+filename, mode="w", newline="") as file: + writer = csv.writer(file) + writer.writerow(header) + for row in [data]: + writer.writerow(row) + + # get package list and write to file + output = subprocess.check_output(['pip', 'list']).decode('utf-8') + with open('performance_indicators/'+perf_ind_name+'/'+packages_filename, 'w') as file: + file.write(output) + + print('M:done') diff --git a/nersc/performance_indicators/perf_ind_variational.py b/nersc/performance_indicators/perf_ind_variational.py new file mode 100644 index 00000000..ba51c6b3 --- /dev/null +++ b/nersc/performance_indicators/perf_ind_variational.py @@ -0,0 +1,148 @@ +import qml_benchmarks +import pennylane as qml +import jax +import jax.numpy as jnp +import numpy as np +import time +import csv +import os +import yaml +import subprocess +from qml_benchmarks.hyperparam_search_utils import read_data + +import argparse + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2, 3, 4], help="increase output verbosity", + default=1, dest='verb') + parser.add_argument("--inputPath", default='linearly_separable/', help='input data location') + parser.add_argument('-n', '--numFeatures', type=int, default=2, help="dataset dimension ") + + args = parser.parse_args() + + print('myArg-program:', parser.prog) + for arg in vars(args): print('myArg:', arg, getattr(args, arg)) + + # assert os.path.exists(args.outPath) + return args + + +# ================================= +# ================================= +# M A I N +# ================================= +# ================================= +if __name__ == "__main__": + args = get_parser() + + ####### SETTINGS ####################### + # You only need to change this to make a different performance indicator + + #define model + # + #from qml_benchmarks.models.quantum_metric_learning import QuantumMetricLearner as Model + from qml_benchmarks.models.iqp_variational import IQPVariationalClassifier as Model + #implementation attributes of model + use_jax = True + vmap = True + jit = True + model_settings = {'use_jax': use_jax, 'vmap': vmap, 'jit': jit} + + max_steps = 10 #the number of gradient descent steps to use to estimate the step time + perf_ind_name = 'JAX' #a name for the performance indicator used for naming files + n_trials = 1 #number of trials to average over + n_test = 10 #number of test set points. For full test set use n_test = -1 + + ################################# + + n_features = args.numFeatures # dataset dimension + model_name = Model().__class__.__name__ + + # get the 'worst case' hyperparameter settings for the model (those that require the most resources) + with open('performance_indicators/hyperparam_settings.yaml', "r") as file: + hp_settings = yaml.safe_load(file) + + hyperparams = {**hp_settings[model_name], **model_settings} + print(hyperparams) + + assert os.path.exists(args.inputPath) + # inpF1=f'../../paper/benchmarks/linearly_separable/linearly_separable_{n_features}d_train.csv' + inpF1 = os.path.join(args.inputPath, 'linearly_separable_%dd_train.csv' % (n_features)) + inpF2 = inpF1.replace('train', 'test') + print('M:inpF1', inpF1) + X_train, y_train = read_data(inpF1) + print('M:inpF2', inpF2) + X_test, y_test = read_data(inpF2) + + if n_test != -1: + X_test = X_test[:n_test] + y_test = y_test[:n_test] + + first_train_steps = [] + av_consec_train_steps = [] + predict_times = [] + + for trial in range(n_trials): + jax.clear_caches() + + model = Model(**hyperparams) + + #note we train for max_steps only, so we won't reach convergence. + model.fit(X_train, y_train) + + #get step times from loss history data + step_times = np.array([model.loss_history_[1][i + 1] - model.loss_history_[1][i] + for i in range(len(model.loss_history_[1]) - 1)]) + step_times = np.insert(step_times, 0, [model.loss_history_[1][0]]) + + #first train step + first_train_steps.append(step_times[0]) + #consecutive (average) train step + av_consec_train_steps.append(float(jnp.mean(step_times[1:]))) + #prediction time + time0 = time.time() + model.predict(X_test) + predict_times.append(time.time() - time0) + + + #calculate mean and stds + first_train_step = np.mean(first_train_steps) + first_train_step_std = np.std(first_train_steps) + + consec_train_step = np.mean(av_consec_train_steps) + consec_train_step_std = np.std(av_consec_train_steps) + + predict_time = np.mean(predict_times) + predict_time_std = np.std(predict_times) + + #write to csv + data = [first_train_step, first_train_step_std, consec_train_step, consec_train_step_std, predict_time, + predict_time_std, hyperparams] + + filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}.csv" + packages_filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}_packages.txt" + scontrol_filename = model_name+f"_linearly_separable_{n_features}d_performance_indicators_{perf_ind_name}_scontrol.txt" + + header = ['first_train_step', 'first_train_step_std', 'consec_train_step', 'consec_train_step_std', 'predict_time', + 'predict_time_std', 'hyperparameters'] + + if not os.path.exists(i'performance_indicators/'+perf_ind_name): + # Create the directory + os.mkdir('performance_indicators/'+perf_ind_name) + + #write perf indicator data + with open('performance_indicators/'+perf_ind_name+'/'+filename, mode="w", newline="") as file: + writer = csv.writer(file) + writer.writerow(header) + for row in [data]: + writer.writerow(row) + + # get package list and write to file + output = subprocess.check_output(['pip', 'list']).decode('utf-8') + with open('performance_indicators/'+perf_ind_name+'/'+packages_filename, 'w') as file: + file.write(output) + + print('M:done') + diff --git a/nersc/performance_indicators/profiling.py b/nersc/performance_indicators/profiling.py new file mode 100644 index 00000000..45336536 --- /dev/null +++ b/nersc/performance_indicators/profiling.py @@ -0,0 +1,108 @@ +import qml_benchmarks +import pennylane as qml +import jax +import jax.numpy as jnp +import numpy as np +import time +import csv +import os +import pickle +import yaml +import subprocess +from qml_benchmarks.hyperparam_search_utils import read_data + +import argparse + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2, 3, 4], help="increase output verbosity", + default=1, dest='verb') + parser.add_argument("--inputPath", default='linearly_separable/', help='input data location') + parser.add_argument('-n', '--numFeatures', type=int, default=2, help="dataset dimension ") + + args = parser.parse_args() + + print('myArg-program:', parser.prog) + for arg in vars(args): print('myArg:', arg, getattr(args, arg)) + + # assert os.path.exists(args.outPath) + return args + + +# ================================= +# ================================= +# M A I N +# ================================= +# ================================= +if __name__ == "__main__": + args = get_parser() + + ####### SETTINGS ####################### + # You only need to change this to make a different performance indicator + + #define model + # + #from qml_benchmarks.models.quantum_metric_learning import QuantumMetricLearner as Model + from qml_benchmarks.models.iqp_variational import IQPVariationalClassifier as Model + #implementation attributes of model + use_jax = True + vmap = True + jit = True + model_settings = {'use_jax': use_jax, 'vmap': vmap, 'jit': jit} + + max_steps = 2 #the number of gradient descent steps to use to estimate the step time + profile_name = 'jax' #a name for the performance indicator used for naming files + + ################################# + + n_features = args.numFeatures + + model_name = Model().__class__.__name__ + + # get the 'worst case' hyperparameter settings for the model (those that require the most resources) + with open('performance_indicators/hyperparam_settings.yaml', "r") as file: + hp_settings = yaml.safe_load(file) + + hyperparams = {**hp_settings[model_name], **model_settings} + print(hyperparams) + + assert os.path.exists(args.inputPath) + + first_step_times = [] + second_step_times = [] + + inpF1 = os.path.join(args.inputPath, 'linearly_separable_%dd_train.csv' % (n_features)) + print('M:inpF1', inpF1) + X_train, y_train = read_data(inpF1) + + jax.clear_caches() + model = Model(**hyperparams) + model.fit(X_train, y_train) + + #get step times from loss history data + step_times = np.array([model.loss_history_[1][i + 1] - model.loss_history_[1][i] + for i in range(len(model.loss_history_[1]) - 1)]) + step_times = np.insert(step_times, 0, [model.loss_history_[1][0]]) + + dir_path = f'performance_indicators/profiling' + file_path = f'{dir_path}/step_times_{profile_name}_{model_name}.pkl' + + if not os.path.exists(dir_path): + os.mkdir(dir_path) + + if not os.path.exists(file_path): + data = {} + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + with open(file_path, 'rb') as file: + data = pickle.load(file) + + data[n_features] = [step_times[0], step_times[1]] + + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + print('M:done') + diff --git a/nersc/performance_indicators/profiling/iqpvar_memory_usage.txt b/nersc/performance_indicators/profiling/iqpvar_memory_usage.txt new file mode 100644 index 00000000..2afb576b --- /dev/null +++ b/nersc/performance_indicators/profiling/iqpvar_memory_usage.txt @@ -0,0 +1,2 @@ +#from 4 to .. +297144, 303920, 334832, 696136, 691800, 781188, 897556, 1003588, 1095188, 1208472, 1326364, 1464716, 1646004, 33495836 \ No newline at end of file diff --git a/nersc/performance_indicators/profiling/iqpvar_memory_usage_no_vmap.txt b/nersc/performance_indicators/profiling/iqpvar_memory_usage_no_vmap.txt new file mode 100644 index 00000000..c76cb21f --- /dev/null +++ b/nersc/performance_indicators/profiling/iqpvar_memory_usage_no_vmap.txt @@ -0,0 +1,2 @@ +#from 4 to .. +438708, 500256, 567260, 353384, 627016, 404372, 866712, 977068, 1081744, 1189500, 1312008, 1459828, 1638916, 2730588, 4112380, 6709988 \ No newline at end of file diff --git a/nersc/performance_indicators/profiling/step_times_catalyst_qjit_IQPKernelClassifier.pkl b/nersc/performance_indicators/profiling/step_times_catalyst_qjit_IQPKernelClassifier.pkl new file mode 100644 index 00000000..f82cbdb5 Binary files /dev/null and b/nersc/performance_indicators/profiling/step_times_catalyst_qjit_IQPKernelClassifier.pkl differ diff --git a/nersc/performance_indicators/profiling/step_times_catalyst_qjit_ProjectedQuantumKernel.pkl b/nersc/performance_indicators/profiling/step_times_catalyst_qjit_ProjectedQuantumKernel.pkl new file mode 100644 index 00000000..b898cd4b Binary files /dev/null and b/nersc/performance_indicators/profiling/step_times_catalyst_qjit_ProjectedQuantumKernel.pkl differ diff --git a/nersc/performance_indicators/profiling/step_times_jax_IQPVariationalClassifier.pkl b/nersc/performance_indicators/profiling/step_times_jax_IQPVariationalClassifier.pkl new file mode 100644 index 00000000..23fbe9cb Binary files /dev/null and b/nersc/performance_indicators/profiling/step_times_jax_IQPVariationalClassifier.pkl differ diff --git a/nersc/performance_indicators/profiling/step_times_jax_no_vmap_IQPVariationalClassifier.pkl b/nersc/performance_indicators/profiling/step_times_jax_no_vmap_IQPVariationalClassifier.pkl new file mode 100644 index 00000000..5b632b22 Binary files /dev/null and b/nersc/performance_indicators/profiling/step_times_jax_no_vmap_IQPVariationalClassifier.pkl differ diff --git a/nersc/performance_indicators/profiling_kernel.py b/nersc/performance_indicators/profiling_kernel.py new file mode 100644 index 00000000..49986eb2 --- /dev/null +++ b/nersc/performance_indicators/profiling_kernel.py @@ -0,0 +1,108 @@ +import qml_benchmarks +import pennylane as qml +import jax +import jax.numpy as jnp +import numpy as np +import time +import csv +import os +import pickle +import yaml +import subprocess +from qml_benchmarks.hyperparam_search_utils import read_data + +import argparse + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2, 3, 4], help="increase output verbosity", + default=1, dest='verb') + parser.add_argument("--inputPath", default='linearly_separable/', help='input data location') + parser.add_argument('-n', '--numFeatures', type=int, default=2, help="dataset dimension ") + + args = parser.parse_args() + + print('myArg-program:', parser.prog) + for arg in vars(args): print('myArg:', arg, getattr(args, arg)) + + # assert os.path.exists(args.outPath) + return args + + +# ================================= +# ================================= +# M A I N +# ================================= +# ================================= +if __name__ == "__main__": + args = get_parser() + + ####### SETTINGS ####################### + # You only need to change this to make a different performance indicator + + #define model + # + from qml_benchmarks.models.iqp_kernel import IQPKernelClassifier as Model + #implementation attributes of model + use_jax = False + vmap = False + jit = True + model_settings = {'use_jax': use_jax, 'vmap': vmap, 'jit': jit} + + profile_name = 'catalyst_qjit' #a name for the performance indicator used for naming files + + ################################# + + n_features = args.numFeatures + + print('NUM FEATURES: ' + str(n_features)) + + model_name = Model().__class__.__name__ + + # get the 'worst case' hyperparameter settings for the model (those that require the most resources) + with open('performance_indicators/hyperparam_settings.yaml', "r") as file: + hp_settings = yaml.safe_load(file) + + hyperparams = {**hp_settings[model_name], **model_settings} + print(hyperparams) + + assert os.path.exists(args.inputPath) + + av_circuit_times = [] #the average time after the first circuit has been compiled + + inpF1 = os.path.join(args.inputPath, 'linearly_separable_%dd_train.csv' % (n_features)) + print('M:inpF1', inpF1) + X_train, y_train = read_data(inpF1) + + jax.clear_caches() + model = Model(**hyperparams) + + if model_name=='ProjectedQuantumKernel': + first_circuit_time = model.circuit(X[0]) + second_circuit_time = model.circuit(X[0]) + elif model_name == 'IQPKernelClassifier': + first_circuit_time = model.circuit(jnp.concatenate((X[0], X[1]))) + second_circuit_time = model.circuit(jnp.concatenate((X[0], X[1]))) + + dir_path = f'performance_indicators/profiling' + file_path = f'{dir_path}/step_times_{profile_name}_{model_name}.pkl' + + if not os.path.exists(dir_path): + os.mkdir(dir_path) + + if not os.path.exists(file_path): + data = {} + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + with open(file_path, 'rb') as file: + data = pickle.load(file) + + data[n_features] = [first_circuit_time, second_circuit_time] + + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + print('M:done') + diff --git a/nersc/performance_indicators/profiling_variational.py b/nersc/performance_indicators/profiling_variational.py new file mode 100644 index 00000000..71c2ccec --- /dev/null +++ b/nersc/performance_indicators/profiling_variational.py @@ -0,0 +1,111 @@ +import qml_benchmarks +import pennylane as qml +import jax +import jax.numpy as jnp +import numpy as np +import time +import csv +import os +import pickle +import yaml +import subprocess +from qml_benchmarks.hyperparam_search_utils import read_data + +import argparse + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2, 3, 4], help="increase output verbosity", + default=1, dest='verb') + parser.add_argument("--inputPath", default='linearly_separable/', help='input data location') + parser.add_argument('-n', '--numFeatures', type=int, default=2, help="dataset dimension ") + + args = parser.parse_args() + + print('myArg-program:', parser.prog) + for arg in vars(args): print('myArg:', arg, getattr(args, arg)) + + # assert os.path.exists(args.outPath) + return args + + +# ================================= +# ================================= +# M A I N +# ================================= +# ================================= +if __name__ == "__main__": + args = get_parser() + + ####### SETTINGS ####################### + # You only need to change this to make a different performance indicator + + #define model + # + #from qml_benchmarks.models.quantum_metric_learning import QuantumMetricLearner as Model + from qml_benchmarks.models.iqp_variational import IQPVariationalClassifier as Model + #implementation attributes of model + use_jax = True + vmap = False + jit = True + model_settings = {'use_jax': use_jax, 'vmap': vmap, 'jit': jit} + + max_steps = 2 #the number of gradient descent steps to use to estimate the step time + profile_name = 'jax_no_vmap_lax' #a name for the performance indicator used for naming files + + + ################################# + + n_features = args.numFeatures + + print('NUM FEATURES: ' + str(n_features)) + + model_name = Model().__class__.__name__ + + # get the 'worst case' hyperparameter settings for the model (those that require the most resources) + with open('performance_indicators/hyperparam_settings.yaml', "r") as file: + hp_settings = yaml.safe_load(file) + + hyperparams = {**hp_settings[model_name], **model_settings} + print(hyperparams) + + assert os.path.exists(args.inputPath) + + first_step_times = [] + second_step_times = [] + + inpF1 = os.path.join(args.inputPath, 'linearly_separable_%dd_train.csv' % (n_features)) + print('M:inpF1', inpF1) + X_train, y_train = read_data(inpF1) + + jax.clear_caches() + model = Model(**hyperparams, max_steps=max_steps) + model.fit(X_train, y_train) + + #get step times from loss history data + step_times = np.array([model.loss_history_[1][i + 1] - model.loss_history_[1][i] + for i in range(len(model.loss_history_[1]) - 1)]) + step_times = np.insert(step_times, 0, [model.loss_history_[1][0]]) + + dir_path = f'performance_indicators/profiling' + file_path = f'{dir_path}/step_times_{profile_name}_{model_name}.pkl' + + if not os.path.exists(dir_path): + os.mkdir(dir_path) + + if not os.path.exists(file_path): + data = {} + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + with open(file_path, 'rb') as file: + data = pickle.load(file) + + data[n_features] = [step_times[0], step_times[1]] + + with open(file_path, 'wb') as file: + pickle.dump(data, file) + + print('M:done') + diff --git a/nersc/pm_podman.source b/nersc/pm_podman.source new file mode 100644 index 00000000..f267304b --- /dev/null +++ b/nersc/pm_podman.source @@ -0,0 +1,24 @@ +#!/bin/bash +# salloc -q interactive -C cpu -t 4:00:00 -A nstaff + +IMG=balewski/ubu22-pennylane:p3 +CFSH=/global/cfs/cdirs/m4139 + +# export PODMANHPC_ADDITIONAL_STORES=/global/cfs/cdirs/nstaff/balewski/podman_common + +echo launch image $IMG +echo you are launching Podman image ... remember to exit + +JNB_PORT=' ' +BASE_DIR=qml-benchmarks # here git has home +WORK_DIR=/$BASE_DIR/nersc + +podman-hpc run -it \ + --volume $HOME:/home \ + --volume $CFSH/$BASE_DIR:/$BASE_DIR \ + --volume $CFSH/$WORK_DIR:$WORK_DIR \ + -e HDF5_USE_FILE_LOCKING='FALSE' \ + --workdir $WORK_DIR \ + $IMG /bin/bash + + diff --git a/nersc/submit_job.slr b/nersc/submit_job.slr new file mode 100755 index 00000000..49a51610 --- /dev/null +++ b/nersc/submit_job.slr @@ -0,0 +1,62 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -C cpu +#SBATCH -q shared -t 1:00:00 +# SBATCH -q debug -t 5:00 # charged for full node +#SBATCH -J perf_ind_qkernel2 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=6 +#SBATCH --output out1/%j.log +#SBATCH --licenses=scratch +# - - - E N D O F SLURM C O M M A N D S +set -u ; # exit if you try to use an uninitialized variable + +echo 'S:start' + +numFeatures=3 + +# .... container +# salloc -q interactive -C cpu -t 4:00:00 -A nstaff + +#IMG=balewski/ubu22-pennylane:p5 # old +IMG=jbowles/ubu22-pennylane:p5 +echo use image $IMG + +POD_PUB=//dvs_ro/cfs/cdirs/m4139/qml-benchmarks/nersc/podman/ +#POD_PUB=/dvs_ro/cfs/cdirs/nstaff/balewski/podman_common/ #old +export PODMANHPC_ADDITIONAL_STORES=$POD_PUB + +#.... output sandbox +CODE_DIR=`pwd` +myJob=${SLURM_JOBID} +outPath=$SCRATCH/tmp_pennylane_jobs/$myJob +echo outPath=$outPath +mkdir -p $outPath +cp -rp performance_indicators *.slr *.sh $outPath + + +#... location of input on CSF +CFSH=/global/cfs/cdirs/m4139 # for Joseph +# CFSH=/pscratch/sd/b/balewski/pennylane_wrk # for Jan +BASE_DIR=/qml-benchmarks # here git has home +WORK_DIR=$BASE_DIR/nersc + +#... the task to be executed +CMD=" python3 -u performance_indicators/perf_ind_kernel.py --numFeatures $numFeatures --inputPath performance_indicators/linearly_separable/ " +#CMD=" python3 -u -c \"import pennylane \"; echo done1 " +#CMD=" python3 -u -c \"import qml_benchmarks \"; echo done2 host " + +cd $outPath +# this is hack Jan does not like but 'paper' dir is not part of pip-install +# qml-benchmarks> cp -rp paper /pscratch/sd/b/balewski/tmp_pennylane_jobs + +#ls -l $CFSH/$BASE_DIR/paper +#ln -s $BASE_DIR/paper .. + +G=1 +echo 'S:ready to run '; pwd +srun -n $G ./wrap_podman.sh $IMG " $CMD " $outPath $CFSH $BASE_DIR $WORK_DIR + +sleep 1 +echo 'S:end '; date + diff --git a/nersc/submit_profiling_jobs.slr b/nersc/submit_profiling_jobs.slr new file mode 100755 index 00000000..6d1fa41a --- /dev/null +++ b/nersc/submit_profiling_jobs.slr @@ -0,0 +1,52 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -C cpu +#SBATCH -q shared -t 1:00:00 +# SBATCH -q debug -t 5:00 # charged for full node +#SBATCH -J perf_ind_qkernel2 +#SBATCH --ntasks-per-node=1 +#SBATCH --cpus-per-task=6 +#SBATCH --output out1/%j.log +#SBATCH --licenses=scratch +# - - - E N D O F SLURM C O M M A N D S +set -u ; # exit if you try to use an uninitialized variable + +echo 'S:start' + +numFeaturesMin=2 +numFeaturesMin=16 + +#IMG=balewski/ubu22-pennylane:p5 # old +IMG=jbowles/ubu22-pennylane:p5 +echo use image $IMG + +POD_PUB=//dvs_ro/cfs/cdirs/m4139/qml-benchmarks/nersc/podman/ +#POD_PUB=/dvs_ro/cfs/cdirs/nstaff/balewski/podman_common/ #old +export PODMANHPC_ADDITIONAL_STORES=$POD_PUB + +#.... output sandbox +CODE_DIR=`pwd` +myJob=${SLURM_JOBID} +outPath=$SCRATCH/tmp_pennylane_jobs/$myJob +echo outPath=$outPath +mkdir -p $outPath +cp -rp performance_indicators *.slr *.sh $outPath + +#... location of input on CSF +CFSH=/global/cfs/cdirs/m4139 # for Joseph +# CFSH=/pscratch/sd/b/balewski/pennylane_wrk # for Jan +BASE_DIR=/qml-benchmarks # here git has home +WORK_DIR=$BASE_DIR/nersc + +cd $outPath + +#... the task to be executed +for ((numFeatures=$numFeaturesMin; numFeatures<=$numFeaturesMax; numFeatures++)); do + CMD=" python3 -u performance_indicators/profiling.py --numFeatures $numFeatures --inputPath performance_indicators/linearly_separable/ " + G=1 + srun -n $G ./wrap_podman.sh $IMG " $CMD " $outPath $CFSH $BASE_DIR $WORK_DIR +sleep 1 + + +echo 'S:end '; date + diff --git a/nersc/toy_slurm/Readme b/nersc/toy_slurm/Readme new file mode 100644 index 00000000..472577d2 --- /dev/null +++ b/nersc/toy_slurm/Readme @@ -0,0 +1,33 @@ +Example of Slurm scripts using fraction of a node (aka shared queue) + +Jan's tricks: +* write Slurm output to out/.log , merge stdout+sderr +* disable selcted Slurm command with '-', e.g.: this is how to change queue by just moving '-' +#SBATCH -q shared +#-SBATCH -q debug +* run task under time to see the burden on the system +* not hardcode paths, use $SCRATCH +* $ sqs command at NERSC is your friend +* alasy ask for 30% more time , 30% more CPUs, 5GB more RAM than you think you need + +Preparation: +* alwasy start Slurm job from CFS (Community File System) so the Slurm-output is written here +cd /global/cfs/cdirs/mXXX/yyy +mkdir out + += = = = = = CPU shared node = = = = = +Inspect: submit_shared_cpu.slr +- reads config from CFS , it controlls how long code runs +- writes output to SCRATCH to sub-dir whos name is jobId +- python task is rank-aware and each runk writes different output + +$ sbatch submit_shared_cpu.slr + +$ scontrol show job + + += = = = = = GPU shared node = = = = = + +$ sbatch submit_shared_gpu.slr +- similar workflow, but now computation is done on GPU +- diff --git a/nersc/toy_slurm/input.json b/nersc/toy_slurm/input.json new file mode 100644 index 00000000..35163625 --- /dev/null +++ b/nersc/toy_slurm/input.json @@ -0,0 +1,4 @@ +{"nrows": 25, +"ncols": 55, +"iterations": 60, +"pause": 1.1} diff --git a/nersc/toy_slurm/run_cpu_task.py b/nersc/toy_slurm/run_cpu_task.py new file mode 100755 index 00000000..a0083758 --- /dev/null +++ b/nersc/toy_slurm/run_cpu_task.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +import numpy as np +import json,os +import time +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--input", type=str, help="input file") + parser.add_argument("--output", type=str, help="output file") + args = parser.parse_args() + + return args + + + +def main(): + args = parse_args(); + for arg in vars(args): + print( 'myArgs:',arg, getattr(args, arg)) + myRank=os.environ['SLURM_PROCID'] + print('I am rank:',myRank) + with open(args.input, 'r') as file: + info = json.load(file) + matrix = np.random.random((info['nrows'],info['ncols'])) + for i in range(info['iterations']): + matrix += np.random.random((info['nrows'],info['ncols'])) + time.sleep(info['pause']) + + output=[] + for i in range(info['nrows']): + output.append(np.sum(matrix[i,:])) + + outF=args.output+myRank + with open(outF, 'w') as file: + for i in range(len(output)): + file.write(str(output[i])+"\n") + file.write("---------------------") + +if __name__ == "__main__": + main() diff --git a/nersc/toy_slurm/run_gpu_task.py b/nersc/toy_slurm/run_gpu_task.py new file mode 100755 index 00000000..9bb540ab --- /dev/null +++ b/nersc/toy_slurm/run_gpu_task.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +import numpy as np +import cupy as cp +import json,os +import time +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--input", type=str, help="input file") + parser.add_argument("--output", type=str, help="output file") + args = parser.parse_args() + + return args + +def main(): + args = parse_args(); + myRank=os.environ['SLURM_PROCID'] + print('I am rank:',myRank) + + # Get the number of available CUDA devices + num_gpus = cp.cuda.runtime.getDeviceCount() + print("Number of GPUs available: %d" % num_gpus) + + + # Access and print some GPU attributes + for i in range(num_gpus): + with cp.cuda.Device(i): + compute_capability = cp.cuda.Device().compute_capability + total_memory = cp.cuda.Device().mem_info[1] + print("GPU %d: Compute Capability: %s, Total Memory: %d bytes" % (i, compute_capability, total_memory)) + + assert num_gpus ==1 # make sure there GPU-rank mapping is unique + + with open(args.input, 'r') as file: + info = json.load(file) + matrix = cp.random.random((info['nrows'],info['ncols']), np.float64) + for i in range(info['iterations']): + matrix += cp.random.random((info['nrows'],info['ncols']), np.float64) + time.sleep(info['pause']) + + output=[] + for i in range(info['nrows']): + output.append(np.sum(matrix[i,:])) + + outF=args.output+myRank + with open(outF, 'w') as file: + for i in range(len(output)): + file.write(str(output[i])+"\n") + file.write("---------------------") + +if __name__ == "__main__": + main() diff --git a/nersc/toy_slurm/submit_shared_cpu.slr b/nersc/toy_slurm/submit_shared_cpu.slr new file mode 100755 index 00000000..8c5dee07 --- /dev/null +++ b/nersc/toy_slurm/submit_shared_cpu.slr @@ -0,0 +1,35 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -C cpu +#SBATCH -q shared +#-SBATCH -q debug # charged for full node +#SBATCH -J test_cpu12 +#SBATCH --account=nstaff +#SBATCH -t 00:08:00 +#SBATCH --ntasks-per-node=2 +#SBATCH --cpus-per-task=4 +#-SBATCH --mem=25GB # optional, use only if needed +#SBATCH --output out/%j.log +#SBATCH --licenses=scratch +# - - - E N D O F SLURM C O M M A N D S + +nprocspn=${SLURM_NTASKS_PER_NODE} +#nprocspn=1 # special case for partial use of full node + +N=${SLURM_NNODES} +G=$[ $N * $nprocspn ] +jobId=${SLURM_JOBID} +echo S: G=$G N=$N + +OUT_DIR=$SCRATCH/penny_tmp/$jobId +mkdir -p $OUT_DIR + +CFSH=/global/cfs/cdirs/mpccc/balewski/pennylane_nesap/toy_slurm + +# .... starting job proper +module load pytorch +time srun -n $G python ./run_cpu_task.py --input $CFSH/input.json --output $OUT_DIR/cpu_output.txt + +echo S:train-done +date + diff --git a/nersc/toy_slurm/submit_shared_gpu.slr b/nersc/toy_slurm/submit_shared_gpu.slr new file mode 100755 index 00000000..5abd33e7 --- /dev/null +++ b/nersc/toy_slurm/submit_shared_gpu.slr @@ -0,0 +1,40 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -C gpu +#SBATCH -q shared +#-SBATCH -q debug # charged for full node +#SBATCH -J test_gpu12 +#SBATCH --account=nstaff +#SBATCH -t 00:08:00 +#SBATCH --gpus 2 # total +#SBATCH --ntasks-per-node=2 # must match # GPUs/node if full node is used +#SBATCH --gpus-per-task=1 # assures a different GPU is given to each task +#SBATCH --output out/%j.log +#SBATCH --licenses=scratch +# - - - E N D O F SLURM C O M M A N D S + +#env|grpe SLURM + +nprocspn=${SLURM_NTASKS} +#nprocspn=1 # special case for partial use of full node + +N=${SLURM_NNODES} +G=$[ $N * $nprocspn ] +jobId=${SLURM_JOBID} +echo S: G=$G N=$N + +OUT_DIR=$SCRATCH/penny_tmp/$jobId +mkdir -p $OUT_DIR + +CFSH=/global/cfs/cdirs/mpccc/balewski/pennylane_nesap/toy_slurm + +echo "S: start nvidia-smi" +nvidia-smi +module load python + +# .... starting job proper +time srun -n $G --gpus-per-task=1 python ./run_gpu_task.py --input $CFSH/input.json --output $OUT_DIR/gpu_output.txt + +echo S:train-done +date + diff --git a/nersc/wrap_podman.sh b/nersc/wrap_podman.sh new file mode 100755 index 00000000..ee8ec5d0 --- /dev/null +++ b/nersc/wrap_podman.sh @@ -0,0 +1,38 @@ +#!/bin/bash +echo W:myRank is $SLURM_PROCID +IMG=$1 +CMD=$2 +outPath=$3 +CFSH=$4 +BASE_DIR=$5 +WORK_DIR=$6 + +if [ $SLURM_PROCID -eq 0 ] ; then + echo W:IMG=$IMG + echo W:CMD=$CMD + #echo Q:fire $ +fi + +echo W:BASE_DIR=$BASE_DIR +echo 'W:start podman' +podman-hpc run -it \ + --volume $CFSH/$BASE_DIR:/root \ + --volume $CFSH/$BASE_DIR:$BASE_DIR \ + --volume $CFSH/$BASE_DIR/nersc/performance_indicators/linearly_separable:/linearly_separable \ + --volume $CFSH/$WORK_DIR:$WORK_DIR \ + -e HDF5_USE_FILE_LOCKING='FALSE' \ + --workdir $WORK_DIR \ + $IMG < 2 * convergence_interval: # get means of last two intervals and standard deviation of last interval average1 = np.mean(loss_history[-convergence_interval:]) - average2 = np.mean( - loss_history[-2 * convergence_interval : -convergence_interval] - ) + average2 = np.mean(loss_history[-2 * convergence_interval : -convergence_interval]) std1 = np.std(loss_history[-convergence_interval:]) # if the difference in averages is small compared to the statistical fluctuations, stop training. if np.abs(average2 - average1) <= std1 / np.sqrt(convergence_interval) / 2: - logging.info( - f"Model {model.__class__.__name__} converged after {step} steps." - ) + logging.info(f"Model {model.__class__.__name__} converged after {step} steps.") + converged = True + break + + end = time.time() + loss_history / np.max(np.abs(loss_history)) + loss_history = np.array(np.vstack((loss_history,steptimes))) + model.loss_history_ = loss_history + model.training_time_ = end - start + + #removed for perforance profiling + # + # if not converged: + # print("Loss did not converge:", loss_history) + # raise ConvergenceWarning( + # f"Model {model.__class__.__name__} has not converged after the maximum number of {model.max_steps} steps." + # ) + + return params + + +def train_without_jax( + model, loss_fn, optimizer, X, y, random_key_generator, convergence_interval=200 +): + """Trains a model using an optimizer and a loss function, using PennyLane's autograd interface.""" + + params = list(model.params_.values()) + opt = optimizer(stepsize=model.learning_rate) + + loss_history = [] + converged = False + start = time.time() + for step in range(model.max_steps): + key = random_key_generator() + X_batch, y_batch = get_batch_without_jax(X, y, key, batch_size=model.batch_size) + X_batch = pnp.array(X_batch, requires_grad=False) + y_batch = pnp.array(y_batch, requires_grad=False) + loss_val = loss_fn(*params, X_batch, y_batch) + params = opt.step(loss_fn, *params, X_batch, y_batch)[: len(params)] + loss_history.append(loss_val) + + logging.debug(f"{step} - loss: {loss_val}") + + if np.isnan(loss_val): + logging.info(f"nan encountered. Training aborted.") + break + + if step > 2 * convergence_interval: + average1 = np.mean(loss_history[-convergence_interval:]) + average2 = np.mean(loss_history[-2 * convergence_interval : -convergence_interval]) + std1 = np.std(loss_history[-convergence_interval:]) + if np.abs(average2 - average1) <= std1 / np.sqrt(convergence_interval) / 2: + logging.info(f"Model {model.__class__.__name__} converged after {step} steps.") converged = True break @@ -116,14 +171,87 @@ def update(params, opt_state, x, y): model.training_time_ = end - start if not converged: - print("Loss did not converge:", loss_history) raise ConvergenceWarning( f"Model {model.__class__.__name__} has not converged after the maximum number of {model.max_steps} steps." ) + for i, key in enumerate(model.params_.keys()): + model.params_[key] = params[i] + + return model.params_ + + +def train_with_catalyst(model, loss_fn, optimizer, X, y, random_key_generator, convergence_interval=200): + + params = model.params_ + opt = optimizer(learning_rate=model.learning_rate) + + def update(i, args): + params, opt_state, X, y, loss_history, key = args + X_batch, y_batch = get_batch(X, y, key, batch_size=model.batch_size) + loss = loss_fn(params, X_batch, y_batch) + loss_history = loss_history.at[i].set(loss) + #backprop is not supported in catalyst yet, falling back on finite diff + grads = catalyst.grad(loss_fn, method="fd")(params,X_batch, y_batch) + updates, opt_state = opt.update(grads, opt_state) + params = optax.apply_updates(params, updates) + key, subkey = jax.random.split(key) + return (params, opt_state, X, y, loss_history, subkey) + + update = qjit(update) if model.jit else update + + def optimize(params, X, y, steps, loss_history, opt_state, key): + args = (params, opt_state, X, y, loss_history, key) + (params, opt_state, _, _, loss_history, key) = catalyst.for_loop(0,steps,1)(update)(args) + return params, loss_history, opt_state + + optimize = qjit(optimize) if model.jit else optimize + + def train_until_convergence(params, X, y): + converged = False + current_steps = 0 + opt_state = opt.init(params) + loss_histories = [] + while current_steps < model.max_steps: + + key = random_key_generator() + loss_history = jnp.zeros(convergence_interval) + params, loss_history, opt_state = optimize(params, X, y, convergence_interval, loss_history, opt_state, key) + loss_histories.append(loss_history) + current_steps += convergence_interval + + if True in jnp.isnan(loss_history): + logging.info(f"nan encountered. Training aborted.") + break + + if len(loss_histories)>=2: + average1 = jnp.mean(loss_histories[-1]) + average2 = jnp.mean(loss_histories[-2]) + std1 = jnp.std(loss_history[-1]) + if jnp.abs(average2 - average1) <= std1 / jnp.sqrt(convergence_interval) / 2: + logging.info(f"Model {model.__class__.__name__} converged after {step} steps.") + converged = True + break + + if not converged: + print("Loss did not converge:", loss_history) + raise ConvergenceWarning( + f"Model {model.__class__.__name__} has not converged after the maximum number of {model.max_steps} steps." + ) + + return params, jnp.concatenate(loss_histories) + + start = time.time() + params, loss_history = train_until_convergence(params, X, y) + end = time.time() + loss_history = np.array(loss_history) + model.loss_history_ = loss_history / np.max(np.abs(loss_history)) + model.training_time_ = end - start + return params + def get_batch(X, y, rnd_key, batch_size=32): """ A generator to get random batches of the data (X, y) @@ -139,9 +267,26 @@ def get_batch(X, y, rnd_key, batch_size=32): array[float]: A batch of target labels shaped (batch_size,) """ all_indices = jnp.array(range(len(X))) - rnd_indices = jax.random.choice( - key=rnd_key, a=all_indices, shape=(batch_size,), replace=True - ) + rnd_indices = jax.random.choice(key=rnd_key, a=all_indices, shape=(batch_size,), replace=True) + return X[rnd_indices], y[rnd_indices] + + +def get_batch_without_jax(X, y, rnd_key, batch_size=32): + """ + A generator to get random batches of the data (X, y) + + Args: + X (array[float]): Input data with shape (n_samples, n_features). + y (array[float]): Target labels with shape (n_samples,) + rnd_key: A jax random key object + batch_size (int): Number of elements in batch + + Returns: + array[float]: A batch of input data shape (batch_size, n_features) + array[float]: A batch of target labels shaped (batch_size,) + """ + all_indices = list(range(len(X))) + rnd_indices = np.random.choice(all_indices, size=(batch_size,), replace=True) return X[rnd_indices], y[rnd_indices] @@ -224,9 +369,7 @@ def chunked_fn(*args): # jnp.concatenate needs to act on arrays with the same shape, so pad the last array if necessary if batch_len / max_vmap % 1 != 0.0: diff = max_vmap - len(res[-1]) - res[-1] = jnp.pad( - res[-1], [(0, diff), *[(0, 0)] * (len(res[-1].shape) - 1)] - ) + res[-1] = jnp.pad(res[-1], [(0, diff), *[(0, 0)] * (len(res[-1].shape) - 1)]) return jnp.concatenate(res)[:-diff] else: return jnp.concatenate(res) @@ -261,9 +404,7 @@ def chunked_grad(params, X, y): set_in_dict( grad_dict, key_list, - jnp.mean( - jnp.array([get_from_dict(grad, key_list) for grad in grads]), axis=0 - ), + jnp.mean(jnp.array([get_from_dict(grad, key_list) for grad in grads]), axis=0), ) return grad_dict @@ -286,9 +427,58 @@ def chunk_loss(loss_fn, max_vmap): def chunked_loss(params, X, y): batch_slices = list(gen_batches(len(X), max_vmap)) - res = jnp.array( - [loss_fn(params, *[X[slice], y[slice]]) for slice in batch_slices] - ) + res = jnp.array([loss_fn(params, *[X[slice], y[slice]]) for slice in batch_slices]) return jnp.mean(res) return chunked_loss + + +####### LOSS UTILS WITHOUT JAX + + +def l2_loss(pred, y): + """ + The square loss function. 0.5 is there to match optax.l2_loss. + """ + return 0.5 * (pred - y) ** 2 + + +def softmax(x, axis=-1): + """ + copied from JAX: https://jax.readthedocs.io/en/latest/_modules/jax/_src/nn/functions.html#softmax + """ + x_max = pnp.max(x, axis, keepdims=True) + unnormalized = pnp.exp(x - x_max) + result = unnormalized / pnp.sum(unnormalized, axis, keepdims=True) + return result + + +def one_hot(a, num_classes=2): + """ + convert an array to a one hot encoded array. + Taken from https://stackoverflow.com/questions/29831489/convert-array-of-indices-to-one-hot-encoded-array-in-numpy + """ + b = pnp.zeros((a.size, num_classes)) + b[pnp.arange(a.size), a] = 1 + return b + + +def log_softmax(x, axis=-1): + """ + taken from jax.nn.log_softmax: + https://jax.readthedocs.io/en/latest/_modules/jax/_src/nn/functions.html#log_softmax + """ + x_arr = pnp.asarray(x) + x_max = pnp.max(x_arr, axis, keepdims=True) + x_max = pnp.array(x_max, requires_grad=False) + shifted = x_arr - x_max + shifted_logsumexp = pnp.log(pnp.sum(pnp.exp(shifted), axis, keepdims=True)) + result = shifted - shifted_logsumexp + return result + + +def softmax_cross_entropy(logits, labels): + """taken from optax source: + https: // github.com / google - deepmind / optax / blob / master / optax / losses / _classification.py # L78%23L103 + """ + return -pnp.sum(labels * log_softmax(logits, axis=-1), axis=-1) diff --git a/src/qml_benchmarks/models/__init__.py b/src/qml_benchmarks/models/__init__.py index a84e4fae..a35b4de2 100644 --- a/src/qml_benchmarks/models/__init__.py +++ b/src/qml_benchmarks/models/__init__.py @@ -80,30 +80,30 @@ class MLPClassifier(MLP): def __init__( - self, - hidden_layer_sizes=(100, 100), - activation="relu", - solver="adam", - alpha=0.0001, - batch_size="auto", - learning_rate="constant", - learning_rate_init=0.001, - power_t=0.5, - max_iter=3000, - shuffle=True, - random_state=None, - tol=1e-4, - verbose=False, - warm_start=False, - momentum=0.9, - nesterovs_momentum=True, - early_stopping=False, - validation_fraction=0.1, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-8, - n_iter_no_change=10, - max_fun=15000, + self, + hidden_layer_sizes=(100, 100), + activation="relu", + solver="adam", + alpha=0.0001, + batch_size="auto", + learning_rate="constant", + learning_rate_init=0.001, + power_t=0.5, + max_iter=3000, + shuffle=True, + random_state=None, + tol=1e-4, + verbose=False, + warm_start=False, + momentum=0.9, + nesterovs_momentum=True, + early_stopping=False, + validation_fraction=0.1, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-8, + n_iter_no_change=10, + max_fun=15000, ): super().__init__( hidden_layer_sizes=hidden_layer_sizes, @@ -134,16 +134,16 @@ def __init__( class SVC(SVC_base): def __init__( - self, - C=1.0, - degree=3, - gamma="scale", - coef0=0.0, - shrinking=True, - probability=False, - tol=0.001, - max_iter=-1, - random_state=None, + self, + C=1.0, + degree=3, + gamma="scale", + coef0=0.0, + shrinking=True, + probability=False, + tol=0.001, + max_iter=-1, + random_state=None, ): super().__init__( C=C, diff --git a/src/qml_benchmarks/models/circuit_centric.py b/src/qml_benchmarks/models/circuit_centric.py index 497a2bd6..1c51e883 100644 --- a/src/qml_benchmarks/models/circuit_centric.py +++ b/src/qml_benchmarks/models/circuit_centric.py @@ -170,12 +170,8 @@ def initialize(self, n_features, classes=None): def initialize_params(self): # initialise the trainable parameters - shape = qml.StronglyEntanglingLayers.shape( - n_layers=self.n_layers, n_wires=self.n_qubits_ - ) - weights = jax.random.uniform( - self.generate_key(), minval=0, maxval=2 * np.pi, shape=shape - ) + shape = qml.StronglyEntanglingLayers.shape(n_layers=self.n_layers, n_wires=self.n_qubits_) + weights = jax.random.uniform(self.generate_key(), minval=0, maxval=2 * np.pi, shape=shape) b = jnp.array(0.01) self.params_ = {"weights": weights, "b": b} @@ -194,9 +190,7 @@ def fit(self, X, y): optimizer = optax.adam def loss_fn(params, X, y): - pred = self.forward( - params, X - ) # jnp.stack([self.forward(params, x) for x in X]) + pred = self.forward(params, X) # jnp.stack([self.forward(params, x) for x in X]) return jnp.mean(optax.l2_loss(pred, y)) if self.jit: @@ -254,7 +248,5 @@ def transform(self, X, preprocess=False): padding = np.ones(shape=(len(X), n_padding)) / max_n_features X_padded = np.c_[X, padding] - X_normalised = np.divide( - X_padded, np.expand_dims(np.linalg.norm(X_padded, axis=1), axis=1) - ) + X_normalised = np.divide(X_padded, np.expand_dims(np.linalg.norm(X_padded, axis=1), axis=1)) return X_normalised diff --git a/src/qml_benchmarks/models/convolutional_neural_network.py b/src/qml_benchmarks/models/convolutional_neural_network.py index f4670d20..08f28c70 100644 --- a/src/qml_benchmarks/models/convolutional_neural_network.py +++ b/src/qml_benchmarks/models/convolutional_neural_network.py @@ -37,13 +37,9 @@ class CNN(nn.Module): @nn.compact def __call__(self, x): - x = nn.Conv( - features=output_channels[0], kernel_size=(kernel_shape, kernel_shape) - )(x) + x = nn.Conv(features=output_channels[0], kernel_size=(kernel_shape, kernel_shape))(x) x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2)) - x = nn.Conv( - features=output_channels[1], kernel_size=(kernel_shape, kernel_shape) - )(x) + x = nn.Conv(features=output_channels[1], kernel_size=(kernel_shape, kernel_shape))(x) x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2)) x = x.reshape((x.shape[0], -1)) # flatten x = nn.Dense(features=output_channels[1] * 2)(x) diff --git a/src/qml_benchmarks/models/data_reuploading.py b/src/qml_benchmarks/models/data_reuploading.py index 9d80f6ad..8466a12a 100644 --- a/src/qml_benchmarks/models/data_reuploading.py +++ b/src/qml_benchmarks/models/data_reuploading.py @@ -138,10 +138,7 @@ def circuit(params, x): x_idx = 0 # to keep track of the data index for i in range(self.n_qubits_): # scaled inputs - angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][layer, x_idx : x_idx + 3] - ) + angles = x[x_idx : x_idx + 3] * params["omegas"][layer, x_idx : x_idx + 3] qml.Rot(*angles, wires=i) # variational @@ -158,8 +155,7 @@ def circuit(params, x): x_idx = 0 for i in range(self.n_qubits_): angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][self.n_layers, x_idx : x_idx + 3] + x[x_idx : x_idx + 3] * params["omegas"][self.n_layers, x_idx : x_idx + 3] + params["thetas"][i, self.n_layers, :] ) qml.Rot(*angles, wires=i) @@ -254,8 +250,7 @@ def loss_fn(params, x, y): / 2 * ( jnp.sum( - (alpha_mat0 * probs0 - y_mat0) ** 2 - + (alpha_mat1 * probs1 - y_mat1) ** 2 + (alpha_mat0 * probs0 - y_mat0) ** 2 + (alpha_mat1 * probs1 - y_mat1) ** 2 ) ) ) # eqn 23 in plots @@ -409,20 +404,14 @@ def circuit(params, x): for layer in range(self.n_layers): x_idx = 0 # to keep track of the data index for i in range(self.n_qubits_): - angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][layer, x_idx : x_idx + 3] - ) + angles = x[x_idx : x_idx + 3] * params["omegas"][layer, x_idx : x_idx + 3] qml.Rot(*angles, wires=i) x_idx += 3 # final reupload without CZs x_idx = 0 for i in range(self.n_qubits_): - angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][self.n_layers, x_idx : x_idx + 3] - ) + angles = x[x_idx : x_idx + 3] * params["omegas"][self.n_layers, x_idx : x_idx + 3] qml.Rot(*angles, wires=i) x_idx += 3 @@ -521,8 +510,7 @@ def circuit(params, x): x_idx = 0 # to keep track of the data index for i in range(self.n_qubits_): angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][layer, x_idx : x_idx + 3] + x[x_idx : x_idx + 3] * params["omegas"][layer, x_idx : x_idx + 3] + params["thetas"][i, layer, :] ) qml.Rot(*angles, wires=i) @@ -532,8 +520,7 @@ def circuit(params, x): x_idx = 0 for i in range(self.n_qubits_): angles = ( - x[x_idx : x_idx + 3] - * params["omegas"][self.n_layers, x_idx : x_idx + 3] + x[x_idx : x_idx + 3] * params["omegas"][self.n_layers, x_idx : x_idx + 3] + params["thetas"][i, self.n_layers, :] ) qml.Rot(*angles, wires=i) diff --git a/src/qml_benchmarks/models/dressed_quantum_circuit.py b/src/qml_benchmarks/models/dressed_quantum_circuit.py index 6ccca60d..354d22f7 100644 --- a/src/qml_benchmarks/models/dressed_quantum_circuit.py +++ b/src/qml_benchmarks/models/dressed_quantum_circuit.py @@ -13,6 +13,8 @@ # limitations under the License. import pennylane as qml +import catalyst +from catalyst import qjit import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.base import BaseEstimator, ClassifierMixin @@ -26,6 +28,8 @@ def __init__( learning_rate=0.001, batch_size=32, max_vmap=None, + use_jax = False, + vmap = False, jit=True, max_steps=100000, convergence_interval=200, @@ -66,6 +70,8 @@ def __init__( self.convergence_interval = convergence_interval self.dev_type = dev_type self.qnode_kwargs = qnode_kwargs + self.use_jax = use_jax + self.vmap = vmap self.jit = jit self.scaling = scaling self.random_state = random_state @@ -130,9 +136,19 @@ def dressed_circuit(params, x): return x if self.jit: - dressed_circuit = jax.jit(dressed_circuit) - self.forward = jax.vmap(dressed_circuit, in_axes=(None, 0)) - self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) + if self.use_jax: + dressed_circuit = jax.jit(dressed_circuit) + else: + dressed_circuit = qjit(dressed_circuit, autograph=True) + + if self.vmap and self.use_jax: + self.forward = jax.vmap(dressed_circuit, in_axes=(None, 0)) + self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) + + else: + def forward(params,X): + return jnp.array([dressed_circuit(params,x) for x in X]) + self.forward = forward return self.forward @@ -161,19 +177,14 @@ def initialize_params(self): circuit_weights = ( 2 * jnp.pi - * jax.random.uniform( - shape=(self.n_layers, self.n_qubits_), key=self.generate_key() - ) + * jax.random.uniform(shape=(self.n_layers, self.n_qubits_), key=self.generate_key()) ) input_weights = ( - jax.random.normal( - shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key() - ) + jax.random.normal(shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key()) / self.n_features_ ) output_weights = ( - jax.random.normal(shape=(2, self.n_qubits_), key=self.generate_key()) - / self.n_features_ + jax.random.normal(shape=(2, self.n_qubits_), key=self.generate_key()) / self.n_features_ ) self.params_ = { "circuit_weights": circuit_weights, @@ -204,16 +215,25 @@ def loss_fn(params, X, y): return jnp.mean(optax.softmax_cross_entropy(vals, labels)) if self.jit: - loss_fn = jax.jit(loss_fn) - self.params_ = train( - self, - loss_fn, - optimizer, - X, - y, - self.generate_key, - convergence_interval=self.convergence_interval, - ) + if self.use_jax: + loss_fn = jax.jit(loss_fn) + else: + loss_fn = qjit(loss_fn) + + if self.use_jax: + self.params_ = train( + self, + loss_fn, + optimizer, + X, + y, + self.generate_key, + convergence_interval=self.convergence_interval, + ) + else: + self.params_ = train_with_catalyst( + self, loss_fn, optimizer, X, y, self.generate_key, + convergence_interval=self.convergence_interval) return self @@ -241,7 +261,11 @@ def predict_proba(self, X): (n_samples, n_classes) """ X = self.transform(X) - return jax.nn.softmax(self.chunked_forward(self.params_, X)) + if self.vmap: + out = self.chunked_forward(self.params_, X) + else: + out = self.forward(self.params_, X) + return jax.nn.softmax(out) def transform(self, X, preprocess=True): """ @@ -277,20 +301,15 @@ def dressed_circuit(params, x): def initialize_params(self): # initialise the trainable parameters mid_weights = ( - jax.random.normal( - shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key() - ) + jax.random.normal(shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key()) / self.n_features_ ) input_weights = ( - jax.random.normal( - shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key() - ) + jax.random.normal(shape=(self.n_qubits_, self.n_qubits_), key=self.generate_key()) / self.n_features_ ) output_weights = ( - jax.random.normal(shape=(2, self.n_qubits_), key=self.generate_key()) - / self.n_features_ + jax.random.normal(shape=(2, self.n_qubits_), key=self.generate_key()) / self.n_features_ ) self.params_ = { "mid_weights": mid_weights, diff --git a/src/qml_benchmarks/models/iqp_kernel.py b/src/qml_benchmarks/models/iqp_kernel.py index e8f5853d..e381bb72 100644 --- a/src/qml_benchmarks/models/iqp_kernel.py +++ b/src/qml_benchmarks/models/iqp_kernel.py @@ -13,6 +13,8 @@ # limitations under the License. import time + +import catalyst import pennylane as qml import numpy as np import jax @@ -21,22 +23,24 @@ from sklearn.svm import SVC from sklearn.preprocessing import MinMaxScaler from qml_benchmarks.model_utils import chunk_vmapped_fn +from catalyst import qjit jax.config.update("jax_enable_x64", True) - class IQPKernelClassifier(BaseEstimator, ClassifierMixin): def __init__( self, svm=SVC(kernel="precomputed", probability=True), repeats=2, C=1.0, - jit=False, + dev_type = None, + use_jax=False, + vmap=True, + jit=True, random_state=42, scaling=1.0, max_vmap=250, - dev_type="default.qubit.jax", - qnode_kwargs={"interface": "jax-jit", "diff_method": None}, + qnode_kwargs={}, ): r""" Kernel version of the classifier from https://arxiv.org/pdf/1804.11326v2.pdf. @@ -58,10 +62,12 @@ def __init__( svm (sklearn.svm.SVC): scikit-learn SVM class object used to fit the model from the kernel matrix repeats (int): number of times the IQP structure is repeated in the embedding circuit. C (float): regularization parameter for SVC. Lower values imply stronger regularization. + use_jax (bool): Whether to use jax. If False, no jitting and vmapping is performed either jit (bool): Whether to use just in time compilation. - dev_type (str): string specifying the pennylane device type; e.g. 'default.qubit'. + vmap (bool): Whether to use jax.vmap. max_vmap (int or None): The maximum size of a chunk to vectorise over. Lower values use less memory. must divide batch_size. + dev_type (str): string specifying the pennylane device type; e.g. 'default.qubit'. qnode_kwargs (str): the key word arguments passed to the circuit qnode. scaling (float): Factor by which to scale the input data. random_state (int): seed used for reproducibility. @@ -69,14 +75,20 @@ def __init__( # attributes that do not depend on data self.repeats = repeats self.C = C - self.jit = jit self.max_vmap = max_vmap self.svm = svm - self.dev_type = dev_type self.qnode_kwargs = qnode_kwargs self.scaling = scaling self.random_state = random_state self.rng = np.random.default_rng(random_state) + # device-related attributes + if dev_type is not None: + self.dev_type = dev_type + else: + self.dev_type = "default.qubit.jax" if use_jax else "lightning.qubit" + self.use_jax = use_jax + self.vmap = vmap + self.jit = jit # data-dependant attributes # which will be initialised by calling "fit" @@ -86,37 +98,46 @@ def __init__( self.circuit = None def generate_key(self): - return jax.random.PRNGKey(self.rng.integers(1000000)) + return self.rng.integers(1000000) def construct_circuit(self): dev = qml.device(self.dev_type, wires=self.n_qubits_) - @qml.qnode(dev, **self.qnode_kwargs) - def circuit(x): - """ - circuit used to precomute the kernel matrix K(x_1,x_2). - Args: - x (np.array): vector of length 2*num_feature that is the concatenation of x_1 and x_2 - - Returns: - (float) the value of the kernel fucntion K(x_1,x_2) - """ - qml.IQPEmbedding( - x[: self.n_qubits_], wires=range(self.n_qubits_), n_repeats=self.repeats - ) - qml.adjoint( + def wrapped_circuit(x): + @qml.qnode(dev, **self.qnode_kwargs) + def circuit(x): + """ + circuit used to precomute the kernel matrix K(x_1,x_2). + Args: + x (np.array): vector of length 2*num_feature that is the concatenation of x_1 and x_2 + + Returns: + (float) the value of the kernel fucntion K(x_1,x_2) + """ qml.IQPEmbedding( - x[self.n_qubits_ :], - wires=range(self.n_qubits_), - n_repeats=self.repeats, + x[: self.n_qubits_], wires=range(self.n_qubits_), n_repeats=self.repeats + ) + qml.adjoint( + qml.IQPEmbedding( + x[self.n_qubits_ :], + wires=range(self.n_qubits_), + n_repeats=self.repeats, + ) ) - ) - return qml.probs() - self.circuit = circuit + return qml.probs() + return circuit(x)[0] + + circuit = wrapped_circuit if self.jit: - circuit = jax.jit(circuit) + if self.use_jax: + circuit = jax.jit(circuit) + else: + circuit = qjit(circuit) + + self.circuit = circuit + return circuit def precompute_kernel(self, X1, X2): @@ -131,19 +152,38 @@ def precompute_kernel(self, X1, X2): dim1 = len(X1) dim2 = len(X2) - # concatenate all pairs of vectors - Z = jnp.array( - [np.concatenate((X1[i], X2[j])) for i in range(dim1) for j in range(dim2)] - ) - circuit = self.construct_circuit() - self.batched_circuit = chunk_vmapped_fn( - jax.vmap(circuit, 0), start=0, max_vmap=self.max_vmap - ) - kernel_values = self.batched_circuit(Z)[:, 0] - # reshape the values into the kernel matrix - kernel_matrix = np.reshape(kernel_values, (dim1, dim2)) + if self.use_jax: + # concatenate all pairs of vectors + Z = np.array([np.concatenate((X1[i], X2[j])) for i in range(dim1) for j in range(dim2)]) + # if batched circuit is used + if self.vmap: + self.batched_circuit = chunk_vmapped_fn( + jax.vmap(circuit, 0), start=0, max_vmap=self.max_vmap + ) + else: + def batched_circuit(X): + return jnp.vstack([circuit(x) for x in X]) + self.batched_circuit = batched_circuit + kernel_values = self.batched_circuit(Z) + # reshape the values into the kernel matrix + kernel_matrix = np.reshape(kernel_values, (dim1, dim2)) + else: + # could also use catalyst.vmap like as above, although I think it does basically the same thing as this. + X1 = jnp.array(X1) + X2 = jnp.array(X2) + def construct_kernel(X1,X2): + dim1 = len(X1) + dim2 = len(X2) + kernel_matrix = jnp.zeros([dim1, dim2]) + for i, x in enumerate(X1): + for j, y in enumerate(X2): + kernel_matrix = kernel_matrix.at[i,j].set(circuit(jnp.concatenate((x, y)))) + return kernel_matrix + if self.jit: + construct_kernel = qjit(construct_kernel, autograph=True) + kernel_matrix = construct_kernel(X1,X2) return kernel_matrix @@ -174,12 +214,7 @@ def fit(self, X, y): y (np.ndarray): Labels of shape (n_samples,) """ - self.svm.random_state = int( - jax.random.randint( - self.generate_key(), shape=(1,), minval=0, maxval=1000000 - ) - ) - + self.svm.random_state = self.generate_key() self.initialize(X.shape[1], np.unique(y)) self.scaler = MinMaxScaler(feature_range=(-np.pi / 2, np.pi / 2)) @@ -187,15 +222,15 @@ def fit(self, X, y): X = self.transform(X) self.params_ = {"x_train": X} + start = time.time() kernel_matrix = self.precompute_kernel(X, X) + self.construct_kernel_time_ = time.time() - start - start = time.time() # we are updating this value here, in case it was # changed after initialising the model self.svm.C = self.C self.svm.fit(kernel_matrix, y) - end = time.time() - self.training_time_ = end - start + self.training_time_ = time.time() - start return self diff --git a/src/qml_benchmarks/models/iqp_variational.py b/src/qml_benchmarks/models/iqp_variational.py index 09883456..82bbd032 100644 --- a/src/qml_benchmarks/models/iqp_variational.py +++ b/src/qml_benchmarks/models/iqp_variational.py @@ -16,6 +16,7 @@ import numpy as np from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.preprocessing import MinMaxScaler +from catalyst import qjit from qml_benchmarks.model_utils import * @@ -26,14 +27,16 @@ def __init__( n_layers=10, learning_rate=0.001, batch_size=32, - max_vmap=None, + use_jax=False, jit=True, + vmap=True, + max_vmap=None, max_steps=10000, convergence_interval=200, random_state=42, scaling=1.0, - dev_type="default.qubit.jax", - qnode_kwargs={"interface": "jax"}, + dev_type=None, + qnode_kwargs={}, ): r""" Variational verison of the classifier from https://arxiv.org/pdf/1804.11326v2.pdf. @@ -54,10 +57,12 @@ def __init__( n_layers (int): Number of layers in the variational part of the circuit. learning_rate (float): Learning rate for gradient descent. batch_size (int): Size of batches used for computing paraemeter updates. + use_jax (bool): Whether to use jax. If False, no jitting and vmapping is performed either. + jit (bool): Whether to use just in time compilation. + vmap (bool): Whether to use jax.vmap. max_vmap (int or None): The maximum size of a chunk to vectorise over. Lower values use less memory. must divide batch_size. - jit (bool): Whether to use just in time compilation. - convergence_threshold (float): If loss changes less than this threshold for 10 consecutive steps we stop training. + convergence_interval (int): If loss does not change significantly in this interval, stop training. max_steps (int): Maximum number of training steps. A warning will be raised if training did not converge. dev_type (str): string specifying the pennylane device type; e.g. 'default.qubit'. qnode_kwargs (str): the keyword arguments passed to the circuit qnode. @@ -72,17 +77,20 @@ def __init__( self.max_steps = max_steps self.convergence_interval = convergence_interval self.batch_size = batch_size - self.dev_type = dev_type self.qnode_kwargs = qnode_kwargs + self.use_jax = use_jax + self.vmap = vmap self.jit = jit self.scaling = scaling self.random_state = random_state self.rng = np.random.default_rng(random_state) - if max_vmap is None: - self.max_vmap = self.batch_size + if dev_type is not None: + self.dev_type = dev_type else: - self.max_vmap = max_vmap + self.dev_type = "default.qubit.jax" if use_jax else "lightning.qubit" + + self.max_vmap = self.batch_size if max_vmap is None else max_vmap # data-dependant attributes # which will be initialised by calling "fit" @@ -97,24 +105,64 @@ def generate_key(self): def construct_model(self): dev = qml.device(self.dev_type, wires=self.n_qubits_) - @qml.qnode(dev, **self.qnode_kwargs) - def circuit(params, x): - """ - The variational circuit from the plots. Uses an IQP data embedding. - We use the same observable as in the plots. - """ - qml.IQPEmbedding(x, wires=range(self.n_qubits_), n_repeats=self.repeats) - qml.StronglyEntanglingLayers( - params["weights"], wires=range(self.n_qubits_), imprimitive=qml.CZ - ) - return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) + if self.use_jax: + + @qml.qnode(dev, **self.qnode_kwargs) + def circuit(params, x): + """ + The variational circuit from the plots. Uses an IQP data embedding. + We use the same observable as in the plots. + """ + qml.IQPEmbedding(x, wires=range(self.n_qubits_), n_repeats=self.repeats) + qml.StronglyEntanglingLayers( + params["weights"], wires=range(self.n_qubits_), imprimitive=qml.CZ + ) + return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) + + self.circuit = circuit + + if self.jit: + circuit = jax.jit(circuit) + + if self.vmap: + # use jax and batch feed the circuit + self.forward = jax.vmap(circuit, in_axes=(None, 0)) + self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) + + else: + def apply_circuit(params, x): + result = circuit(params, x) + return params, result + + def forward(params, X): + params, results = jax.lax.scan(apply_circuit, params, X) + return results + + self.forward = forward + + else: + + @qml.qnode(dev, **self.qnode_kwargs) + def circuit(params, x): + """ + The variational circuit from the plots. Uses an IQP data embedding. + We use the same observable as in the plots. + """ + qml.IQPEmbedding(x, wires=range(self.n_qubits_), n_repeats=self.repeats) + qml.StronglyEntanglingLayers( + params["weights"], wires=range(self.n_qubits_), imprimitive=qml.CZ + ) + return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - self.circuit = circuit + if self.jit: + circuit = qjit(circuit) - if self.jit: - circuit = jax.jit(circuit) - self.forward = jax.vmap(circuit, in_axes=(None, 0)) - self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) + self.circuit = circuit + + def forward(params, X): + return jnp.array([circuit(params, x) for x in X]) + + self.forward = forward return self.forward @@ -139,13 +187,19 @@ def initialize(self, n_features, classes=None): def initialize_params(self): # initialise the trainable parameters - weights = ( - 2 - * jnp.pi - * jax.random.uniform( - shape=(self.n_layers, self.n_qubits_, 3), key=self.generate_key() + + if self.use_jax: + weights = ( + 2 + * jnp.pi + * jax.random.uniform( + shape=(self.n_layers, self.n_qubits_, 3), key=self.generate_key() + ) ) - ) + else: + weights = 2 * np.pi * np.random.uniform(size=(self.n_layers, self.n_qubits_, 3)) + weights = jnp.array(weights) + self.params_ = {"weights": weights} def fit(self, X, y): @@ -162,24 +216,43 @@ def fit(self, X, y): self.scaler.fit(X) X = self.transform(X) - optimizer = optax.adam - - def loss_fn(params, X, y): - expvals = self.forward(params, X) - probs = (1 - expvals * y) / 2 # the probs of incorrect classification - return jnp.mean(probs) - - if self.jit: - loss_fn = jax.jit(loss_fn) - self.params_ = train( - self, - loss_fn, - optimizer, - X, - y, - self.generate_key, - convergence_interval=self.convergence_interval, - ) + if self.use_jax: + + optimizer = optax.adam + + def loss_fn(params, X, y): + expvals = self.forward(params, X) + probs = (1 - expvals * y) / 2 # the probs of incorrect classification + return jnp.mean(probs) + + if self.jit: + loss_fn = jax.jit(loss_fn) + + self.params_ = train( + self, + loss_fn, + optimizer, + X, + y, + self.generate_key, + convergence_interval=self.convergence_interval, + ) + + else: + X = np.array(X) + y = np.array(y) + optimizer = optax.adam + + def loss_fn(params, X, y): + expvals = self.forward(params, X) + probs = (1 - expvals * y) / 2 # the probs of incorrect classification + return jnp.mean(probs) + + if self.jit: + loss_fn = qjit(loss_fn) + + self.params_ = train_with_catalyst(self, loss_fn, optimizer, X, y, self.generate_key, + convergence_interval=self.convergence_interval) return self @@ -207,7 +280,13 @@ def predict_proba(self, X): (n_samples, n_classes) """ X = self.transform(X) - predictions = self.chunked_forward(self.params_, X) + if self.use_jax: + if self.vmap: + predictions = self.chunked_forward(self.params_, X) + else: + predictions = self.forward(self.params_, X) + else: + predictions = self.forward(self.params_, X) predictions_2d = np.c_[(1 - predictions) / 2, (1 + predictions) / 2] return predictions_2d diff --git a/src/qml_benchmarks/models/projected_quantum_kernel.py b/src/qml_benchmarks/models/projected_quantum_kernel.py index 83ae06ed..337d3dd1 100644 --- a/src/qml_benchmarks/models/projected_quantum_kernel.py +++ b/src/qml_benchmarks/models/projected_quantum_kernel.py @@ -13,6 +13,9 @@ # limitations under the License. import time + +import catalyst +from catalyst import qjit import pennylane as qml import numpy as np import jax @@ -24,7 +27,6 @@ jax.config.update("jax_enable_x64", True) - class ProjectedQuantumKernel(BaseEstimator, ClassifierMixin): def __init__( self, @@ -34,11 +36,13 @@ def __init__( embedding="Hamiltonian", t=1.0 / 3, trotter_steps=5, + use_jax=False, jit=True, + vmap = True, max_vmap=None, scaling=1.0, - dev_type="default.qubit.jax", - qnode_kwargs={"interface": "jax-jit", "diff_method": None}, + dev_type=None, + qnode_kwargs={}, random_state=42, ): r""" @@ -91,17 +95,18 @@ def __init__( self.embedding = embedding self.t = t self.trotter_steps = trotter_steps + self.use_jax = use_jax + self.vmap = vmap self.jit = jit - self.dev_type = dev_type self.qnode_kwargs = qnode_kwargs self.scaling = scaling self.random_state = random_state self.rng = np.random.default_rng(random_state) - - if max_vmap is None: - self.max_vmap = 50 + if dev_type is not None: + self.dev_type = dev_type else: - self.max_vmap = max_vmap + self.dev_type = "default.qubit.jax" if use_jax else "lightning.qubit" + self.max_vmap = 50 if max_vmap is None else max_vmap # data-dependant attributes # which will be initialised by calling "fit" @@ -116,19 +121,12 @@ def generate_key(self): return jax.random.PRNGKey(self.rng.integers(1000000)) def construct_circuit(self): - """ - Constructs the circuit to get the expvals of a given qubit and Pauli operator - We will use JAX to parallelize over these circuits in precompute kernel. - Args: - P: a pennylane Pauli X,Y,Z operator on a given qubit - """ - if self.embedding == "IQP": + if self.embedding == "IQP": def embedding(x): qml.IQPEmbedding(x, wires=range(self.n_qubits_), n_repeats=2) elif self.embedding == "Hamiltonian": - def embedding(x): evol_time = self.t / self.trotter_steps * (self.n_qubits_ - 1) for i in range(self.n_qubits_): @@ -146,26 +144,60 @@ def embedding(x): dev = qml.device(self.dev_type, wires=self.n_qubits_) - @qml.qnode(dev, **self.qnode_kwargs) - def circuit(x): - embedding(x) - return ( - [qml.expval(qml.PauliX(wires=i)) for i in range(self.n_qubits_)] - + [qml.expval(qml.PauliY(wires=i)) for i in range(self.n_qubits_)] - + [qml.expval(qml.PauliZ(wires=i)) for i in range(self.n_qubits_)] - ) + if self.use_jax: + @qml.qnode(dev, **self.qnode_kwargs) + def circuit(x): + embedding(x) + return ( + [qml.expval(qml.PauliX(wires=i)) for i in range(self.n_qubits_)] + + [qml.expval(qml.PauliY(wires=i)) for i in range(self.n_qubits_)] + + [qml.expval(qml.PauliZ(wires=i)) for i in range(self.n_qubits_)] + ) - self.circuit = circuit + def circuit_XYZ(x): + return jnp.array(circuit(x)) - def circuit_as_array(x): - return jnp.array(circuit(x)) + else: + # currently only support for returning expvals of qubit-wise commuting observables, + # so we split into three circuits + @qml.qnode(dev, **self.qnode_kwargs) + def circuitX(x): + embedding(x) + return [qml.expval(qml.PauliX(wires=i)) for i in range(self.n_qubits_)] + + @qml.qnode(dev, **self.qnode_kwargs) + def circuitY(x): + embedding(x) + return [qml.expval(qml.PauliY(wires=i)) for i in range(self.n_qubits_)] + + @qml.qnode(dev, **self.qnode_kwargs) + def circuitZ(x): + embedding(x) + return [qml.expval(qml.PauliZ(wires=i)) for i in range(self.n_qubits_)] + + def circuit_XYZ(x): + xvals = jnp.array(circuitX(x)) + yvals = jnp.array(circuitY(x)) + zvals = jnp.array(circuitZ(x)) + return jnp.concatenate((xvals, yvals, zvals)) if self.jit: - circuit_as_array = jax.jit(circuit_as_array) - circuit_as_array = jax.vmap(circuit_as_array, in_axes=(0)) - circuit_as_array = chunk_vmapped_fn(circuit_as_array, 0, self.max_vmap) + if self.use_jax: + circuit_XYZ = jax.jit(circuit_XYZ) + else: + circuit_XYZ = qjit(circuit_XYZ, autograph=True) + + if self.vmap: + if self.use_jax: + batched_circuit = jax.vmap(circuit_XYZ, in_axes=(0)) + batched_circuit = chunk_vmapped_fn(batched_circuit, 0, self.max_vmap) + else: + batched_circuit = catalyst.vmap(circuit_XYZ, in_axes=(0)) + else: + def batched_circuit(X): + return jnp.vstack([circuit_XYZ(x) for x in X]) - return circuit_as_array + return batched_circuit def precompute_kernel(self, X1, X2): """ @@ -182,48 +214,70 @@ def precompute_kernel(self, X1, X2): # get all of the Pauli expvals needed to constrcut the kernel self.circuit = self.construct_circuit() - valsX1 = np.array(self.circuit(X1)) - valsX1 = np.reshape(valsX1, (dim1, 3, -1)) - valsX2 = np.array(self.circuit(X2)) - valsX2 = np.reshape(valsX2, (dim2, 3, -1)) - - valsX_X1 = valsX1[:, 0] - valsX_X2 = valsX2[:, 0] - valsY_X1 = valsX1[:, 1] - valsY_X2 = valsX2[:, 1] - valsZ_X1 = valsX1[:, 2] - valsZ_X2 = valsX2[:, 2] - - all_vals_X1 = np.reshape(np.concatenate((valsX_X1, valsY_X1, valsZ_X1)), -1) - default_gamma = 1 / np.var(all_vals_X1) / self.n_features_ - - # construct kernel following plots - kernel_matrix = np.zeros([dim1, dim2]) - - for i in range(dim1): - for j in range(dim2): - sumX = sum( - [ - (valsX_X1[i, q] - valsX_X2[j, q]) ** 2 - for q in range(self.n_qubits_) - ] - ) - sumY = sum( - [ - (valsY_X1[i, q] - valsY_X2[j, q]) ** 2 - for q in range(self.n_qubits_) - ] - ) - sumZ = sum( - [ - (valsZ_X1[i, q] - valsZ_X2[j, q]) ** 2 - for q in range(self.n_qubits_) - ] - ) + if self.use_jax: + #not actually using JAX here but it is part of the JAX pipeline + valsX1 = np.array(self.circuit(X1)) + valsX1 = np.reshape(valsX1, (dim1, 3, -1)) + valsX2 = np.array(self.circuit(X2)) + valsX2 = np.reshape(valsX2, (dim2, 3, -1)) + + valsX_X1 = valsX1[:, 0] + valsX_X2 = valsX2[:, 0] + valsY_X1 = valsX1[:, 1] + valsY_X2 = valsX2[:, 1] + valsZ_X1 = valsX1[:, 2] + valsZ_X2 = valsX2[:, 2] + + all_vals_X1 = np.reshape(np.concatenate((valsX_X1, valsY_X1, valsZ_X1)), -1) + default_gamma = 1 / np.var(all_vals_X1) / self.n_features_ + + kernel_matrix = np.zeros([dim1, dim2]) + for i in range(dim1): + for j in range(dim2): + sumX = sum([(valsX_X1[i, q] - valsX_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + sumY = sum([(valsY_X1[i, q] - valsY_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + sumZ = sum([(valsZ_X1[i, q] - valsZ_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + + kernel_matrix[i, j] = np.exp( + -default_gamma * self.gamma_factor * (sumX + sumY + sumZ) + ) + + else: + valsX1 = jnp.array(self.circuit(X1)) + valsX1 = jnp.reshape(valsX1, (dim1, 3, -1)) + valsX2 = jnp.array(self.circuit(X2)) + valsX2 = jnp.reshape(valsX2, (dim2, 3, -1)) + + valsX_X1 = valsX1[:, 0] + valsX_X2 = valsX2[:, 0] + valsY_X1 = valsX1[:, 1] + valsY_X2 = valsX2[:, 1] + valsZ_X1 = valsX1[:, 2] + valsZ_X2 = valsX2[:, 2] + + all_vals_X1 = jnp.reshape(jnp.concatenate((valsX_X1, valsY_X1, valsZ_X1)), -1) + default_gamma = 1 / jnp.var(all_vals_X1) / self.n_features_ + + def construct_kernel(valsX_X1, valsX_X2, valsY_X1, valsY_X2, valsZ_X1, valsZ_X2): + kernel_matrix = jnp.zeros([dim1, dim2]) + for i in range(dim1): + for j in range(dim2): + kx = jnp.array([(valsX_X1[i, q] - valsX_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + ky = jnp.array([(valsY_X1[i, q] - valsY_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + kz = jnp.array([(valsZ_X1[i, q] - valsZ_X2[j, q]) ** 2 for q in range(self.n_qubits_)]) + + sumX = jnp.sum(kx) + sumY = jnp.sum(ky) + sumZ = jnp.sum(kz) + + kernel_matrix = kernel_matrix.at[i,j].set(jnp.exp( + -default_gamma * self.gamma_factor * (sumX + sumY + sumZ)) + ) + return kernel_matrix + + construct_kernel = qjit(construct_kernel, autograph=True) if self.jit else construct_kernel + kernel_matrix = construct_kernel(valsX_X1, valsX_X2, valsY_X1, valsY_X2, valsZ_X1, valsZ_X2) - kernel_matrix[i, j] = np.exp( - -default_gamma * self.gamma_factor * (sumX + sumY + sumZ) - ) return kernel_matrix def initialize(self, n_features, classes=None): @@ -266,15 +320,16 @@ def fit(self, X, y): self.scaler = MinMaxScaler(feature_range=(-np.pi / 2, np.pi / 2)) self.scaler.fit(X) X = self.transform(X) + X = jnp.array(X) self.params_ = {"X_train": X} + start = time.time() kernel_matrix = self.precompute_kernel(X, X) + self.construct_kernel_time_ = time.time() - start - start = time.time() self.svm.C = self.C self.svm.fit(kernel_matrix, y) - end = time.time() - self.training_time_ = end - start + self.training_time_ = time.time() - start return self diff --git a/src/qml_benchmarks/models/quantum_boltzmann_machine.py b/src/qml_benchmarks/models/quantum_boltzmann_machine.py index 843c3b97..ba394e9a 100644 --- a/src/qml_benchmarks/models/quantum_boltzmann_machine.py +++ b/src/qml_benchmarks/models/quantum_boltzmann_machine.py @@ -7,17 +7,19 @@ from sklearn.preprocessing import StandardScaler import itertools from qml_benchmarks.model_utils import chunk_vmapped_fn + jax.config.update("jax_enable_x64", True) sigmaZ = jnp.array([[1, 0], [0, -1]]) sigmaX = jnp.array([[0, 1], [1, 0]]) sigmaY = jnp.array([[0, -1j], [1j, 0]]) + def tensor_ops(ops, idxs, n_qubits): """ Returns a tensor product of two operators acting at indexes idxs in an n_qubit system """ - tensor_op = 1. + tensor_op = 1.0 for i in range(n_qubits): if i in idxs: j = idxs.index(i) @@ -26,21 +28,22 @@ def tensor_ops(ops, idxs, n_qubits): tensor_op = jnp.kron(tensor_op, jnp.eye(2)) return tensor_op + class QuantumBoltzmannMachine(BaseEstimator, ClassifierMixin): def __init__( - self, - visible_qubits='single', - observable_type='sum', - temperature=1, - learning_rate=0.001, - batch_size=32, - max_vmap=None, - jit=True, - max_steps=10000, - convergence_threshold=1e-6, - random_state=42, - scaling=1.0 + self, + visible_qubits="single", + observable_type="sum", + temperature=1, + learning_rate=0.001, + batch_size=32, + max_vmap=None, + jit=True, + max_steps=10000, + convergence_threshold=1e-6, + random_state=42, + scaling=1.0, ): """ Variational Quantum Boltzmann Machine from https://arxiv.org/abs/2006.06004 @@ -109,7 +112,7 @@ def __init__( # which will be initialised by calling "fit" self.params_ = None # Dictionary containing the trainable parameters self.n_qubits = None - self.n_visible = None #number of visible qubits + self.n_visible = None # number of visible qubits self.scaler = None # data scaler will be fitted on training data self.circuit = None @@ -121,16 +124,19 @@ def construct_model(self): doubles = list(itertools.combinations(np.arange(self.n_qubits), 2)) self.n_params_ = 2 * len(singles) + len(doubles) - if self.observable_type == 'sum': - obs = sum([tensor_ops([sigmaZ], (i,), self.n_qubits) for i in range(self.n_visible)])/self.n_visible - elif self.observable_type == 'product': - obs = 1. + if self.observable_type == "sum": + obs = ( + sum([tensor_ops([sigmaZ], (i,), self.n_qubits) for i in range(self.n_visible)]) + / self.n_visible + ) + elif self.observable_type == "product": + obs = 1.0 for i in range(self.n_visible): obs = jnp.kron(obs, sigmaZ) def gibbs_state(thetas, x): - H = jnp.zeros([2 ** self.n_qubits, 2 ** self.n_qubits]) + H = jnp.zeros([2**self.n_qubits, 2**self.n_qubits]) count = 0 for idxs in singles: H = H + tensor_ops([sigmaZ], idxs, self.n_qubits) * jnp.dot(thetas[count], x) @@ -139,7 +145,9 @@ def gibbs_state(thetas, x): count = count + 1 for idxs in doubles: - H = H + tensor_ops([sigmaZ, sigmaZ], idxs, self.n_qubits) * jnp.dot(thetas[count], x) + H = H + tensor_ops([sigmaZ, sigmaZ], idxs, self.n_qubits) * jnp.dot( + thetas[count], x + ) state = jax.scipy.linalg.expm(-H / self.temperature, max_squarings=32) return state / jnp.trace(state) @@ -148,8 +156,9 @@ def model(thetas, x): state = gibbs_state(thetas, x) return jnp.trace(jnp.matmul(state, obs)) - if self.jit: model = jax.jit(model) - self.forward = jax.vmap(model,in_axes=(None,0)) + if self.jit: + model = jax.jit(model) + self.forward = jax.vmap(model, in_axes=(None, 0)) self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) return self.forward @@ -171,11 +180,11 @@ def initialize(self, n_features, classes=None): self.n_qubits = n_features - if self.visible_qubits == 'single': + if self.visible_qubits == "single": self.n_visible = 1 - elif self.visible_qubits == 'half': - self.n_visible = self.n_qubits//2 - elif self.visible_qubits == 'all': + elif self.visible_qubits == "half": + self.n_visible = self.n_qubits // 2 + elif self.visible_qubits == "all": self.n_visible = self.n_qubits self.construct_model() @@ -203,12 +212,13 @@ def fit(self, X, y): def loss_fn(params, X, y): # binary cross entropy loss - vals = self.forward(params['thetas'], X) + vals = self.forward(params["thetas"], X) probs = (1 + vals) / 2 y = jax.nn.relu(y) # convert to 0,1 - return jnp.mean(-y*jnp.log(probs) - (1-y)*jnp.log(1-probs)) + return jnp.mean(-y * jnp.log(probs) - (1 - y) * jnp.log(1 - probs)) - if self.jit: loss_fn = jax.jit(loss_fn) + if self.jit: + loss_fn = jax.jit(loss_fn) self.params_ = train(self, loss_fn, optimizer, X, y, self.generate_key) return self @@ -237,7 +247,7 @@ def predict_proba(self, X): (n_samples, n_classes) """ X = self.transform(X) - predictions = self.forward(self.params_['thetas'], X) + predictions = self.forward(self.params_["thetas"], X) predictions_2d = np.c_[(1 - predictions) / 2, (1 + predictions) / 2] return predictions_2d @@ -258,25 +268,30 @@ class QuantumBoltzmannMachineSeparable(QuantumBoltzmannMachine): def construct_model(self): def qubit_gibbs_state(thetas, x): - H = sigmaZ*jnp.dot(thetas[0],x)+sigmaX*jnp.dot(thetas[1],x) + H = sigmaZ * jnp.dot(thetas[0], x) + sigmaX * jnp.dot(thetas[1], x) state = jax.scipy.linalg.expm(-H / self.temperature, max_squarings=32) return state / jnp.trace(state) def model(thetas, x): - gibbs_states = [qubit_gibbs_state(thetas[2*i:2*i+2,:],x) for i in range(self.n_visible)] - expvals = jnp.array([jnp.trace(jnp.matmul(state, sigmaZ)) for state in gibbs_states ]) - if self.observable_type=='sum': + gibbs_states = [ + qubit_gibbs_state(thetas[2 * i : 2 * i + 2, :], x) for i in range(self.n_visible) + ] + expvals = jnp.array([jnp.trace(jnp.matmul(state, sigmaZ)) for state in gibbs_states]) + if self.observable_type == "sum": return jnp.mean(expvals) - elif self.observable_type=='product': + elif self.observable_type == "product": return jnp.prod(expvals) - if self.jit: model = jax.jit(model) - self.forward = jax.vmap(model,in_axes=(None,0)) + if self.jit: + model = jax.jit(model) + self.forward = jax.vmap(model, in_axes=(None, 0)) self.chunked_forward = chunk_vmapped_fn(self.forward, 1, self.max_vmap) return self.forward def initialize_params(self): # initialise the trainable parameters - params = jax.random.normal(shape=(2*self.n_qubits, self.n_qubits), key=self.generate_key()) + params = jax.random.normal( + shape=(2 * self.n_qubits, self.n_qubits), key=self.generate_key() + ) self.params_ = {"thetas": params} diff --git a/src/qml_benchmarks/models/quantum_kitchen_sinks.py b/src/qml_benchmarks/models/quantum_kitchen_sinks.py index 6dd6d34d..68c389b5 100644 --- a/src/qml_benchmarks/models/quantum_kitchen_sinks.py +++ b/src/qml_benchmarks/models/quantum_kitchen_sinks.py @@ -180,9 +180,7 @@ def initialize_params(self): betas = ( 2 * np.pi - * jax.random.uniform( - key=self.generate_key(), shape=(self.n_episodes, self.n_qubits_) - ) + * jax.random.uniform(key=self.generate_key(), shape=(self.n_episodes, self.n_qubits_)) ) self.params_ = {"omegas": np.array(omegas), "betas": np.array(betas)} @@ -241,11 +239,7 @@ def transform(self, X, preprocess=True): Args: X (np.ndarray): Data of shape (n_samples, n_features) """ - if ( - self.params_["betas"] is None - or self.params_["omegas"] is None - or self.circuit is None - ): + if self.params_["betas"] is None or self.params_["omegas"] is None or self.circuit is None: raise ValueError("Model cannot predict without fitting to data first.") if preprocess: diff --git a/src/qml_benchmarks/models/quantum_metric_learning.py b/src/qml_benchmarks/models/quantum_metric_learning.py index 2dcfda5b..0ec5fca0 100644 --- a/src/qml_benchmarks/models/quantum_metric_learning.py +++ b/src/qml_benchmarks/models/quantum_metric_learning.py @@ -14,13 +14,15 @@ import warnings import pennylane as qml +import catalyst +from catalyst import qjit import numpy as np import jax from jax import numpy as jnp import optax from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.preprocessing import MinMaxScaler -from qml_benchmarks.model_utils import chunk_vmapped_fn, train +from qml_benchmarks.model_utils import chunk_vmapped_fn, train, train_with_catalyst jax.config.update("jax_enable_x64", True) @@ -50,11 +52,13 @@ def __init__( max_steps=50000, learning_rate=0.01, batch_size=32, - max_vmap=4, + use_jax = True, + vmap = True, + max_vmap=None, jit=True, random_state=42, scaling=1.0, - dev_type="default.qubit.jax", + dev_type=None, qnode_kwargs={"interface": "jax-jit"}, ): """ @@ -100,16 +104,19 @@ def __init__( self.learning_rate = learning_rate self.batch_size = batch_size self.jit = jit - self.dev_type = dev_type + self.use_jax = use_jax + self.vmap = vmap self.qnode_kwargs = qnode_kwargs self.scaling = scaling self.random_state = random_state self.rng = np.random.default_rng(random_state) - if max_vmap is None: - self.max_vmap = self.batch_size + if dev_type is not None: + self.dev_type = dev_type else: - self.max_vmap = max_vmap + self.dev_type = "default.qubit.jax" if use_jax else "lightning.qubit" + + self.max_vmap = 4 if max_vmap is None else max_vmap # data-dependant attributes # which will be initialised by calling "fit" @@ -117,7 +124,6 @@ def __init__( self.n_qubits_ = None self.n_features_ = None self.scaler = None # data scaler will be fitted on training data - self.circuit = None def generate_key(self): return jax.random.PRNGKey(self.rng.integers(1000000)) @@ -126,33 +132,44 @@ def construct_model(self): dev = qml.device(self.dev_type, wires=self.n_qubits_) wires = range(self.n_qubits_) - @qml.qnode(dev, **self.qnode_kwargs) - def circuit(params, x1, x2): - qml.QAOAEmbedding(features=x1, weights=params["weights"], wires=wires) - qml.adjoint(qml.QAOAEmbedding)( - features=x2, weights=params["weights"], wires=wires - ) - return qml.expval( - qml.Projector(np.array([0] * self.n_qubits_), wires=wires) - ) + def wrapped_circuit(params, x1, x2): + @qml.qnode(dev, **self.qnode_kwargs) + def circuit(params, x1, x2): + qml.QAOAEmbedding(features=x1, weights=params["weights"], wires=wires) + qml.adjoint(qml.QAOAEmbedding(features=x2, weights=params["weights"], wires=wires)) + return qml.probs() + return circuit(params, x1, x2)[0] - self.circuit = circuit + circuit = wrapped_circuit if self.jit: - circuit = jax.jit(circuit) - overlaps = jax.vmap(circuit, in_axes=(None, 0, 0)) - chunked_overlaps = chunk_vmapped_fn(overlaps, start=1, max_vmap=self.max_vmap) + if self.use_jax: + circuit = jax.jit(circuit) + else: + qjit(circuit, autograph=True) + + # always vmapping for now + if self.use_jax: + batched_overlaps = jax.vmap(circuit, in_axes=(None, 0, 0)) + chunked_overlaps = chunk_vmapped_fn(batched_overlaps, start=1, max_vmap=self.max_vmap) + else: + def batched_overlaps(params, X1, X2): + return jnp.array([circuit(params, elem[0], elem[1]) for elem in jnp.stack((X1, X2), axis=1)]) def model(params, X1=None, X2=None): - res = overlaps(params, X1, X2) - return jnp.mean(res) - - def chunked_model(params, X1=None, X2=None): - res = chunked_overlaps(params, X1, X2) + res = batched_overlaps(params, X1, X2) return jnp.mean(res) self.forward = model - self.chunked_forward = chunked_model + + if self.use_jax: + def chunked_model(params, X1=None, X2=None): + res = chunked_overlaps(params, X1, X2) + return jnp.mean(res) + + self.chunked_forward = chunked_model + else: + self.chunked_forward = self.forward return self.forward @@ -202,9 +219,7 @@ def fit(self, X, y): B = jnp.array(X[y == 1]) if self.batch_size > min(len(A), len(B)): - warnings.warn( - "batch size too large, setting to " + str(min(len(A), len(B))) - ) + warnings.warn("batch size too large, setting to " + str(min(len(A), len(B)))) self.batch_size = min(len(A), len(B)) def loss_fn(params, A=None, B=None): @@ -216,18 +231,17 @@ def loss_fn(params, A=None, B=None): return 1 - d_hs if self.jit: - loss_fn = jax.jit(loss_fn) + loss_fn = jax.jit(loss_fn) if self.use_jax else qjit(loss_fn) optimizer = optax.adam - self.params_ = train( - self, - loss_fn, - optimizer, - A, - B, - self.generate_key, - convergence_interval=self.convergence_interval, - ) + + if self.use_jax: + self.params_ = train(self, loss_fn, optimizer, A, B, self.generate_key, + convergence_interval=self.convergence_interval) + + else: + self.params_ = train_with_catalyst(self, loss_fn, optimizer, A, B, self.generate_key, + convergence_interval=self.convergence_interval) self.params_["examples_-1"] = A self.params_["examples_+1"] = B @@ -260,13 +274,9 @@ def predict_proba(self, X): X = self.transform(X) - max_examples = min( - len(self.params_["examples_-1"]), len(self.params_["examples_+1"]) - ) + max_examples = min(len(self.params_["examples_-1"]), len(self.params_["examples_+1"])) if self.n_examples_predict > max_examples: - warnings.warn( - "n_examples_predict too large, setting to " + str(max_examples) - ) + warnings.warn("n_examples_predict too large, setting to " + str(max_examples)) self.n_examples_predict = max_examples A_examples, B_examples = get_batch( @@ -281,8 +291,12 @@ def predict_proba(self, X): # create list [x, x, x, ...] to get overlaps with A_examples = [a1, a2, a3...] and B_examples x_tiled = jnp.tile(x, (self.n_examples_predict, 1)) - pred_a = jnp.mean(self.chunked_forward(self.params_, A_examples, x_tiled)) - pred_b = jnp.mean(self.chunked_forward(self.params_, B_examples, x_tiled)) + if self.use_jax: + pred_a = jnp.mean(self.chunked_forward(self.params_, A_examples, x_tiled)) + pred_b = jnp.mean(self.chunked_forward(self.params_, B_examples, x_tiled)) + else: + pred_a = jnp.mean(self.forward(self.params_, A_examples, x_tiled)) + pred_b = jnp.mean(self.forward(self.params_, B_examples, x_tiled)) # normalise to [0,1] predictions.append([pred_a / (pred_a + pred_b), pred_b / (pred_a + pred_b)]) diff --git a/src/qml_benchmarks/models/quanvolutional_neural_network.py b/src/qml_benchmarks/models/quanvolutional_neural_network.py index d08d4f07..790417ac 100644 --- a/src/qml_benchmarks/models/quanvolutional_neural_network.py +++ b/src/qml_benchmarks/models/quanvolutional_neural_network.py @@ -174,9 +174,7 @@ def construct_random_circuit(self): jnp.pi * 2 * jnp.array( - jax.random.uniform( - self.generate_key(), shape=(self.rand_depth, self.rand_rot) - ) + jax.random.uniform(self.generate_key(), shape=(self.rand_depth, self.rand_rot)) ) ) @@ -203,14 +201,10 @@ def construct_quanvolutional_layer(self): """ construct the quantum feature map. """ - random_circuits = [ - self.construct_random_circuit() for __ in range(self.n_qchannels) - ] + random_circuits = [self.construct_random_circuit() for __ in range(self.n_qchannels)] # construct an array that specifies the indices of the 'windows' of the image used for the convolution. - idx_mat = jnp.array( - [[(i, j) for j in range(self.width)] for i in range(self.height)] - ) + idx_mat = jnp.array([[(i, j) for j in range(self.width)] for i in range(self.height)]) idxs = jnp.array( [ idx_mat[j : j + self.qkernel_shape, k : k + self.qkernel_shape] diff --git a/src/qml_benchmarks/models/separable.py b/src/qml_benchmarks/models/separable.py index 56e852f0..f2a773fe 100644 --- a/src/qml_benchmarks/models/separable.py +++ b/src/qml_benchmarks/models/separable.py @@ -101,9 +101,7 @@ def single_qubit_circuit(weights, x): below to get the full circuit """ for layer in range(self.encoding_layers): - qml.Rot( - weights[layer, 0], weights[layer, 1], weights[layer, 2], wires=0 - ) + qml.Rot(weights[layer, 0], weights[layer, 1], weights[layer, 2], wires=0) qml.RY(x, wires=0) qml.Rot( weights[self.encoding_layers, 0], @@ -341,9 +339,7 @@ def precompute_kernel(self, X1, X2): dim2 = len(X2) # concatenate all pairs of vectors - Z = np.array( - [np.concatenate((X1[i], X2[j])) for i in range(dim1) for j in range(dim2)] - ) + Z = np.array([np.concatenate((X1[i], X2[j])) for i in range(dim1) for j in range(dim2)]) self.construct_circuit() kernel_values = [self.forward(z) for z in Z] # reshape the values into the kernel matrix @@ -379,9 +375,7 @@ def fit(self, X, y): """ self.svm.random_state = int( - jax.random.randint( - self.generate_key(), shape=(1,), minval=0, maxval=1000000 - ) + jax.random.randint(self.generate_key(), shape=(1,), minval=0, maxval=1000000) ) self.initialize(X.shape[1], np.unique(y)) diff --git a/src/qml_benchmarks/models/tree_tensor.py b/src/qml_benchmarks/models/tree_tensor.py index b4199ac6..9c386d49 100644 --- a/src/qml_benchmarks/models/tree_tensor.py +++ b/src/qml_benchmarks/models/tree_tensor.py @@ -110,8 +110,7 @@ def circuit(params, x): qml.CNOT, wires=range(self.n_qubits), pattern=[ - ((i + 2**layer), i) - for i in range(0, self.n_qubits, 2 ** (layer + 1)) + ((i + 2**layer), i) for i in range(0, self.n_qubits, 2 ** (layer + 1)) ], ) qml.RY(params["weights"][count], wires=0) @@ -162,11 +161,7 @@ def initialize(self, n_features, classes=None): def initialize_params(self): # initialise the trainable parameters weights = ( - 2 - * jnp.pi - * jax.random.uniform( - shape=(2 * self.n_qubits - 1,), key=self.generate_key() - ) + 2 * jnp.pi * jax.random.uniform(shape=(2 * self.n_qubits - 1,), key=self.generate_key()) ) bias = 0.1 * jax.random.normal(shape=(1,), key=self.generate_key()) @@ -241,9 +236,7 @@ def transform(self, X, preprocess=True): n_features = X.shape[1] X = X * self.scaling - n_qubits_ae = int( - np.ceil(np.log2(n_features)) - ) # the num qubits needed to amplitude encode + n_qubits_ae = int(np.ceil(np.log2(n_features))) # the num qubits needed to amplitude encode n_qubits = 2 ** int( np.ceil(np.log2(n_qubits_ae)) ) # the model needs 2**m qubits, for some m @@ -252,7 +245,5 @@ def transform(self, X, preprocess=True): padding = np.ones(shape=(len(X), n_padding)) / max_n_features X_padded = np.c_[X, padding] - X_normalised = np.divide( - X_padded, np.expand_dims(np.linalg.norm(X_padded, axis=1), axis=1) - ) + X_normalised = np.divide(X_padded, np.expand_dims(np.linalg.norm(X_padded, axis=1), axis=1)) return X_normalised diff --git a/src/qml_benchmarks/models/weinet.py b/src/qml_benchmarks/models/weinet.py index 57a8193b..7669df46 100644 --- a/src/qml_benchmarks/models/weinet.py +++ b/src/qml_benchmarks/models/weinet.py @@ -152,16 +152,12 @@ def circuit(x): jnp.reshape(x, -1), wires=wires, normalize=True, pad_with=0.0 ) qml.QubitUnitary( - jnp.kron( - self.unitaries[nu][nu], jnp.array(self.unitaries[nu][mu]) - ), + jnp.kron(self.unitaries[nu][nu], jnp.array(self.unitaries[nu][mu])), wires=wires, ) return [qml.expval(op) for op in operators] - self.circuit = ( - circuit # we use the last one of the circuits here as an example - ) + self.circuit = circuit # we use the last one of the circuits here as an example if self.jit: circuit = jax.jit(circuit) @@ -176,9 +172,7 @@ def forward_fn(self, params, x): is equivalent to classically sampling one of the unitaries Q_i, parameterised by params['s']. """ probs = jax.nn.softmax(params["s"]) - expvals = jnp.array( - [probs[i] * jnp.array(self.circuits[i](x)).T for i in range(9)] - ) + expvals = jnp.array([probs[i] * jnp.array(self.circuits[i](x)).T for i in range(9)]) expvals = jnp.sum(expvals, axis=0) out = jnp.sum(params["weights"] * expvals) # out = jax.nn.sigmoid(out) # convert to a probability @@ -215,15 +209,12 @@ def initialize_params(self): """ # no of expvals that are combined with weights n_expvals = int( - self.n_qubits_ - - 1 - + factorial(self.n_qubits_ - 2) / 2 / factorial(self.n_qubits_ - 4) + self.n_qubits_ - 1 + factorial(self.n_qubits_ - 2) / 2 / factorial(self.n_qubits_ - 4) ) self.params_ = { "s": jax.random.normal(self.generate_key(), shape=(9,)), - "weights": jax.random.normal(self.generate_key(), shape=(n_expvals,)) - / n_expvals, + "weights": jax.random.normal(self.generate_key(), shape=(n_expvals,)) / n_expvals, } def fit(self, X, y):