Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

.vscode/
node_modules/

Expand Down
10,881 changes: 10,881 additions & 0 deletions Graph_viz.ipynb

Large diffs are not rendered by default.

252 changes: 252 additions & 0 deletions QML_debugging.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU",
"gpuClass": "standard"
},
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "dPIxaRoBBvWq",
"outputId": "b1ea48cb-26f4-4df9-e69e-883fe8c256b9"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m14.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m352.1/352.1 kB\u001b[0m \u001b[31m24.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m16.5/16.5 MB\u001b[0m \u001b[31m72.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m48.3/48.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m55.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h"
]
}
],
"source": [
"!pip install pennylane dm-haiku --quiet"
]
},
{
"cell_type": "code",
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import haiku as hk\n",
"import jax\n",
"import optax\n",
"from sklearn.metrics import accuracy_score\n",
"import pennylane as qml\n",
"from pennylane import numpy as np\n",
"import jax\n",
"from jax import random\n",
"import haiku as hk\n",
"\n",
"# Load data\n",
"x_train = pd.read_excel(\"x_train.xlsx\")\n",
"y_train = pd.read_excel(\"y_train.xlsx\")\n",
"x_test = pd.read_excel(\"x_test.xlsx\")\n",
"y_test = pd.read_excel(\"y_test.xlsx\")\n",
"\n",
"n_qubits = 8\n",
"epochs = 200\n",
"batch_size = 32\n",
"num_layers = 8\n",
"\n",
"num_batches = len(x_train) // batch_size\n",
"dev = qml.device(\"default.qubit\", wires=n_qubits)\n",
"\n",
"def quantum_layer(weights):\n",
" qml.templates.AngleEmbedding(weights[:, 0], rotation=\"Y\", wires=range(n_qubits))\n",
" qml.templates.AngleEmbedding(weights[:, 1], rotation=\"Z\", wires=range(n_qubits))\n",
" for i in range(8):\n",
" qml.CNOT(wires=[i, (i + 1) % 8])\n",
"\n",
"@qml.qnode(dev, interface=\"jax\")\n",
"def quantum_circuit(x, circuit_weights):\n",
" for weights in circuit_weights:\n",
" qml.templates.AngleEmbedding(x, wires=range(n_qubits))\n",
" quantum_layer(weights)\n",
" return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]\n",
"\n",
"\n",
"@hk.without_apply_rng\n",
"@hk.transform\n",
"def forward(x):\n",
" x = jax.nn.tanh(hk.Linear(8)(x))\n",
" W = hk.get_parameter(\n",
" \"W\", (num_layers, 8, 3), init=hk.initializers.RandomNormal(stddev=0.25)\n",
" )\n",
" x = jax.vmap(quantum_circuit, in_axes=(0, None))(x, W)\n",
" s = hk.get_parameter(\"s\", (8,), init=hk.initializers.Constant(1.0))\n",
" x = hk.Linear(1)(x)\n",
" return x\n",
"\n",
"\n",
"seed = 123\n",
"rng = jax.random.PRNGKey(seed)\n",
"params = forward.init(rng, x_train.values)\n",
"opt = optax.radam(learning_rate=1e-3)\n",
"opt_state = opt.init(params)\n",
"\n",
"\n",
"# Training loop\n",
"def loss_fn(params, x, y):\n",
" pred = forward.apply(params, x)\n",
" loss = optax.sigmoid_binary_cross_entropy(pred, y).mean()\n",
" return loss\n",
"\n",
"\n",
"@jax.jit\n",
"def update(params, opt_state, x, y):\n",
" loss, grads = jax.value_and_grad(loss_fn)(params, x, y)\n",
" updates, new_opt_state = opt.update(grads, opt_state)\n",
" new_params = optax.apply_updates(params, updates)\n",
" return new_params, new_opt_state, loss\n",
"\n",
"loss_list = []\n",
"test_acc = []\n",
"for epoch in range(epochs):\n",
" # Shuffle the training data\n",
" shuffled_indices = np.random.permutation(len(x_train))\n",
" x_train_shuffled = x_train.values[shuffled_indices]\n",
" y_train_shuffled = y_train.values[shuffled_indices]\n",
"\n",
" # Training\n",
" epoch_loss = 0\n",
" for batch_idx in range(num_batches):\n",
" start = batch_idx * batch_size\n",
" end = start + batch_size\n",
"\n",
" x_batch = x_train_shuffled[start:end]\n",
" y_batch = y_train_shuffled[start:end]\n",
"\n",
" params, opt_state, batch_loss = update(params, opt_state, x_batch, y_batch)\n",
" epoch_loss += batch_loss\n",
"\n",
" epoch_loss /= num_batches\n",
" loss_list.append(epoch_loss)\n",
"\n",
" # Testing\n",
" y_pred = forward.apply(params, x_test.values)\n",
" y_pred_labels = (y_pred > 0.5).astype(int)\n",
" test_accuracy = accuracy_score(y_test, y_pred_labels)\n",
" print(\n",
" f\"Epoch {epoch + 1}, Loss: {epoch_loss:.4f}\"\n",
" ) # \", Test Accuracy: {test_accuracy:.4f}\")\n",
" test_acc.append(test_accuracy)\n",
"\n",
"# Testing\n",
"y_pred = forward.apply(params, x_test.values)\n",
"y_pred_labels = (y_pred > 0.5).astype(int)\n",
"test_accuracy = accuracy_score(y_test.values, y_pred_labels)\n",
"print(f\"Test Accuracy: {test_accuracy:.4f}\")"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "6taEfGwayspN",
"outputId": "76de6059-37b2-4cb6-8ed7-557a55048d63"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"/usr/local/lib/python3.10/dist-packages/haiku/_src/base.py:515: UserWarning: Explicitly requested dtype float64 requested in zeros is not available, and will be truncated to dtype float32. To enable more dtypes, set the jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell environment variable. See https://github.com/google/jax#current-gotchas for more.\n",
" param = init(shape, dtype)\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"Epoch 1, Loss: 0.6972\n",
"Epoch 2, Loss: 0.6962\n",
"Epoch 3, Loss: 0.6953\n",
"Epoch 4, Loss: 0.6938\n",
"Epoch 5, Loss: 0.6926\n",
"Epoch 6, Loss: 0.6919\n",
"Epoch 7, Loss: 0.6904\n",
"Epoch 8, Loss: 0.6881\n",
"Epoch 9, Loss: 0.6845\n",
"Epoch 10, Loss: 0.6817\n",
"Epoch 11, Loss: 0.6794\n",
"Epoch 12, Loss: 0.6775\n",
"Epoch 13, Loss: 0.6750\n",
"Epoch 14, Loss: 0.6733\n",
"Epoch 15, Loss: 0.6714\n",
"Epoch 16, Loss: 0.6696\n",
"Epoch 17, Loss: 0.6669\n",
"Epoch 18, Loss: 0.6644\n",
"Epoch 19, Loss: 0.6583\n",
"Epoch 20, Loss: 0.6467\n",
"Epoch 21, Loss: 0.6405\n",
"Epoch 22, Loss: 0.6376\n",
"Epoch 23, Loss: 0.6307\n",
"Epoch 24, Loss: 0.6309\n",
"Epoch 25, Loss: 0.6234\n",
"Epoch 26, Loss: 0.6200\n",
"Epoch 27, Loss: 0.6111\n",
"Epoch 28, Loss: 0.6052\n",
"Epoch 29, Loss: 0.6029\n",
"Epoch 30, Loss: 0.6029\n",
"Epoch 31, Loss: 0.6029\n",
"Epoch 32, Loss: 0.6035\n",
"Epoch 33, Loss: 0.5886\n",
"Epoch 34, Loss: 0.5912\n",
"Epoch 35, Loss: 0.5905\n",
"Epoch 36, Loss: 0.5797\n",
"Epoch 37, Loss: 0.5697\n",
"Epoch 38, Loss: 0.5592\n",
"Epoch 39, Loss: 0.5550\n",
"Epoch 40, Loss: 0.5538\n",
"Epoch 41, Loss: 0.5579\n",
"Epoch 42, Loss: 0.5425\n",
"Epoch 43, Loss: 0.5481\n",
"Epoch 44, Loss: 0.5697\n",
"Epoch 45, Loss: 0.5371\n",
"Epoch 46, Loss: 0.5364\n",
"Epoch 47, Loss: 0.5301\n",
"Epoch 48, Loss: 0.5276\n",
"Epoch 49, Loss: 0.5174\n",
"Epoch 50, Loss: 0.5123\n",
"Epoch 51, Loss: 0.5616\n",
"Epoch 52, Loss: 0.5414\n",
"Epoch 53, Loss: 0.5862\n",
"Epoch 54, Loss: 0.5897\n",
"Epoch 55, Loss: 0.5490\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "A_9lwsX70I9L"
},
"execution_count": null,
"outputs": []
}
]
}
50 changes: 5 additions & 45 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
[<img src="https://qbraid-static.s3.amazonaws.com/logos/Launch_on_qBraid_white.png" width="150">](https://account.qbraid.com?gitHubUrl=https://github.com/qBraid/NYUAD-2023.git)

# NYUAD Hackathon for Social Good in the Arab World: Focusing on Quantum Computing (QC)
# NYUAD Hackathon for Social Good in the Arab World: Focusing on Quantum Computing (QC) and UN Sustainable Development Goals (SDGs).

https://nyuad.nyu.edu/en/events/2022/march/nyuad-hackathon-event.html

March 30 - April 1, 2022

## Technical challenge

_Create a program that applies one or more quantum algorithms to a social good
Expand All @@ -18,48 +14,12 @@ problem of your choice._
- Variational Quantum Eigensolver (VQE)
- Quantum Approximate Optimization Algorithm (QAOA)

**Social good topic examples**:

- Healthcare
- Science (e.g. AI, cryptography, biochemistry)
- Environment (climate)
- Education & Literacy
- Food Securities
- Crisis & Public Safety
- Financial Modeling
- Gaming

**Implementation requirements**:

- Must utilize quantum hardware available through preferably IBM. However, we
set also have AWS devices available as well.
- You are also free to use any technology which allows you to solve the
challenge.

**Bonus requirements**:

- Incorporate noisy simulation through IBM
- Incorporate a hybrid quantum-classical task through IBM

# qBraid Tutorials

Here, we provide useful tutorials on how to use qBraid-Lab, along with tutorials
on quantum computing, using IBM or Amazon Braket. The ladder were provided by
the [qiskit-tutorials](https://github.com/qiskit/qiskit-tutorials) and the
[amazon-braket-examples](https://github.com/aws/amazon-braket-examples) github
repositories repsectively.

The repository is structured as follows:

- [Setting up Qiskit environment](qbraid_qiskit_setup/accessing_ibm_hardware.ipynb)

---

## <a name="qbraid">Setting up Qiskit environment in qBraid</a>

- [**Install Qiskit in qBraid-Lab**](qbraid_qiskit_setup/accessing_ibm_hardware.ipynb)
## Leak Detection and Localization

- [**Enable Qiskit QPU access through qBraid-CLI**](qbraid_qiskit_setup/accessing_ibm_hardware.ipynb)
- [**Example environent setup on qBraid Youtube video**](https://www.youtube.com/watch?v=LyavbzSkvRo) (Please use the code EHNU6626)
### Using Quantum Machine Learning
Existing classical literature, suggests the use of machine learning to predict leakage and localise it to a particular pipe using the data from pressure sensors in the WDN at any given point of time. We attempt to solve the same using a quantum machine learning based model.

---
Specifically, we collect the pressure data from the optimally-placed sensors in a water distribution network to predict leakage in the WDN using a quantum neural network. It is implemented in the Pennylane framework using Jax. The data is fed into the model using Angle encoding. The model is composed of a parametrised quantum circuit with RY, RZ and CNOT gates which are trained over a total of 500 epochs. We use a train to test-set ratio of 4:1 and optimise the model using Rectified adam over the binary cross-entropy loss. At the end we obtain a test accuracy of 87.02% over the dataset of size 650.
Binary file added WDN animation.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading