From 90d50d0699cb004a44d020fbe1321704a1b54536 Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Mon, 6 Oct 2025 16:12:35 -0400 Subject: [PATCH 01/31] Add quantum kernel pre-screening demo based on Huang et al. --- .../huang_geometric_kernel_difference/demo.py | 940 ++++++++++++++++++ .../metadata.json | 40 + .../requirements.in | 5 + 3 files changed, 985 insertions(+) create mode 100644 demonstrations_v2/huang_geometric_kernel_difference/demo.py create mode 100644 demonstrations_v2/huang_geometric_kernel_difference/metadata.json create mode 100644 demonstrations_v2/huang_geometric_kernel_difference/requirements.in diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py new file mode 100644 index 0000000000..4d1fee8b60 --- /dev/null +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -0,0 +1,940 @@ +r"""Is a Quantum Kernel Worth Exploring? A ‘:math:`g`-first’ Reality Check +====================================================================== + +Can we predict—*before* investing a ton of research hours- whether a quantum kernel has the +potential to beat a classical one across all kernel methods? + +From a practitioner’s perspective, such a **pre-screening test** is invaluable: it lets us rule out +quantum kernels that don’t offer any potential quantum advantage right from the start. + +Huang *et al.* (https://arxiv.org/abs/2011.01938) introduced exactly this test. Their proposed +**geometric difference :math:`g`** metric is a single scalar that quantifies how differently the +geometries defined by two kernels represent your data. The formula for :math:`g` is: + +:math:`g = \sqrt{\|\sqrt{K_q} K_c^{-1} \sqrt{K_q}\|_\infty}` + +where :math:`K_q` and :math:`K_c` are quantum and classical Gram matrices, respectively. + +Kernel Refresher +---------------- + +A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without +explicitly computing their feature representations in high-dimensional spaces, thus lowering the +computational cost. + +Classical Kernels +~~~~~~~~~~~~~~~~~ + +- Example: RBF (Radial Basis Function) kernel +- Formula: :math:`k(x, x') = \exp(-\gamma \|x - x'\|^2)` +- Implicitly computes: :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle` +- The feature map :math:`\phi(x)` projects to infinite dimensions but is never calculated directly + +Quantum Kernels +~~~~~~~~~~~~~~~ + +- Formula: :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2` +- :math:`|\psi(x)\rangle` is the quantum state encoding the classical data :math:`x` +- For :math:`n` qubits, the quantum state lives in a :math:`2^n`-dimensional Hilbert space that is + implicitly manipulated + +Key Concepts +~~~~~~~~~~~~ + +The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store +all pairwise similarities between data points. + +What :math:`g` tells us: +------------------------ + +- **If :math:`g` ≈ 1**: The quantum kernel’s geometry is essentially the same as a good classical + kernel’s → the quantum kernel offers no geometric advantage, making it unlikely to outperform the + classical kernel **in any kernel-based learning algorithm** (e.g., SVM, Gaussian Processes). Huang + et al. proved this concept in a rigorous mathematical way in their paper. + +- **If :math:`g` ≫ 1**: The quantum geometry is genuinely different → **a kernel method using the + quantum kernel** *might* offer an advantage. + +Why this matters: +----------------- + +This approach focuses on ruling out underperforming quantum kernels before investing in training. +From a complexity theory point of view, computing :math:`g` scales as :math:`O(n^3)` due to the +matrix inversion, and the most expensive training algorithms such as Gaussian Processes also scale +as :math:`O(n^3)` so we might think we are not saving any computational time. However, from a +practical perspective, the real savings come from avoiding wasted researcher effort. + +When a quantum kernel performs poorly, researchers often spend days exploring different algorithm +hyperparameters, cross-validation strategies, and implementation debugging. If :math:`g \approx 1`, +you immediately know the quantum kernel’s geometry offers no advantage—it’s not your implementation, +not your algorithm choice, and not a hyperparameter issue. The kernel is fundamentally limited +compared to classical kernels on this specific dataset. + +Demonstration setup: +-------------------- + +1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn`` +2. **Five kernels to compare**: + + - **Classical baseline**: Gaussian-RBF kernel + - **Quantum kernels**: + + - Separable-rotation embedding (E1) + - IQP-style embedding (E2) + - Projected kernel from E1 (maximizing :math:`g` as proposed by Huang et al.) + - Projected kernel from E2 + +3. **Our approach**: Calculate :math:`g` values between the classical kernel and each quantum + kernel—giving us an immediate assessment of which quantum approaches might be worth pursuing + across any kernel-based method +""" + +# We first start by generating and visualizing the artificial data +import numpy as np +import matplotlib.pyplot as plt +import scipy + +from sklearn.datasets import make_moons +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler + +np.random.seed(422) + +X_raw, y = make_moons(n_samples=300, noise=0.10, random_state=0) + +# train/test split BEFORE any scaling (avoid data leakage) ------------ +X_train_raw, X_test_raw, y_train, y_test = train_test_split( + X_raw, y, test_size=0.30, random_state=0, stratify=y +) + +# standardize the data +scaler = StandardScaler().fit(X_train_raw) # statistics from train only +X_train = scaler.transform(X_train_raw) +X_test = scaler.transform(X_test_raw) + +print(f"Train size: {X_train.shape[0]} Test size: {X_test.shape[0]}") + +# visualize it using a scatter plot +plt.figure(figsize=(4, 4)) +plt.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], s=15, alpha=0.8, label="class 0") +plt.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], s=15, alpha=0.8, label="class 1") +plt.axis("equal") +plt.title("Two‑moons — training split (standardised)") +plt.legend(frameon=False) +plt.show() + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# Train size: 210 Test size: 90 +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_1_1.png +# :align: center +# :width: 80% + +###################################################################### +# Quantum kernels: fidelity-based and projected variants +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# | We consider **five different kernels**, derived from three sources: a classical RBF kernel and two +# quantum embedding circuits — **E1** and **E2**. +# | Each kernel defines a different geometry for measuring similarity between data points. +# +# - | **RBF – Classical radial basis function kernel** +# | A classical baseline defined as: +# | +# +# .. math:: +# +# +# k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# +# This maps data into an infinite-dimensional space where closer inputs remain close, and distant +# ones become nearly orthogonal. +# | It captures a **geometric**, distance-based notion of similarity in input space. +# +# - | **E1 – Separable RX rotations** +# | Each input feature :math:`x_j` is encoded into a single qubit using an :math:`RX(x_j)` gate. +# | The circuit is fully separable (no entanglement), producing the quantum state +# :math:`\lvert \psi_{\text{E1}}(x) \rangle`. +# +# - | **E2 – IQP embedding** +# | PennyLane’s ``qml.IQPEmbedding`` applies Hadamards, parameterized :math:`RZ(x_j)` rotations, and +# entangling ZZ gates. +# | This creates an entangled quantum state :math:`\lvert \psi_{\text{E2}}(x) \rangle`, inspired by +# Instantaneous Quantum Polynomial (IQP) circuits. +# +# - | **QK – Standard quantum kernels** +# | For both E1 and E2, the kernel is defined by the **fidelity** between quantum states: +# +# .. math:: +# +# +# k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 +# +# .. math:: +# +# +# k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 +# +# where :math:`\psi_{\text{E1}}(x)` and :math:`\psi_{\text{E2}}(x)` are the quantum states generated +# by E1 and E2 respectively. These kernels reflect how aligned two quantum feature states are in +# Hilbert space. +# +# - | **PQK – Projected quantum kernels (PQK-E1 / PQK-E2)** +# | For a projected quantum kernel, instead of computing fidelity, the output quantum state +# :math:`|\psi(x)\rangle` +# | is **measured** to extract the expectation values of Pauli operators: +# | +# +# .. math:: +# +# +# v(x) = \left[ \langle X_0 \rangle, \langle Y_0 \rangle, \langle Z_0 \rangle, \dots, \langle Z_{n-1} \rangle \right] +# +# A classical **RBF kernel** is then applied to these real-valued vectors: +# | +# +# .. math:: +# +# +# k_{\text{PQK}}(x, x') = \exp\left( -\gamma \| v(x) - v(x') \|^2 \right) +# +# We obtain two different projected quantum kernels from E1 and E1: +# +# .. math:: +# +# +# k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) +# +# .. math:: +# +# +# k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) +# +# where :math:`v_{\text{E1 }}(x)` and :math:`v_{\text{E2}}(x)` are the Pauli expectation vector from +# E1 and E2 respectively. +# + +# We define the embedding circuits E1 and E2, and we visualize them. +import numpy as np +import pennylane as qml +import matplotlib.pyplot as plt + +n_features = X_train.shape[1] +n_qubits = n_features + + +# -- E1: separable RX rotations --------------------------------------------- +def embedding_E1(features): + for j, xj in enumerate(features): + qml.RX(np.pi * xj, wires=j) + + +# -- E2: IQP embedding via PennyLane template -------------------------------- +def embedding_E2(features): + qml.IQPEmbedding(features, wires=range(n_features)) + + +print("E1 Embedding Circuit:") +fig, ax = qml.draw_mpl(embedding_E1)(np.zeros(n_qubits)) +plt.show() + +print("\nE2 Embedding Circuit:") +fig, ax = qml.draw_mpl(embedding_E2)(np.zeros(n_qubits)) +plt.show() + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# E1 Embedding Circuit: +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_1.png +# :align: center +# :width: 80% + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# +# E2 Embedding Circuit: +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_2.png +# :align: center +# :width: 80% + +###################################################################### +# Gram Matrix Computation +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# Using the kernels defined above, we now build the **Gram (kernel) matrices** required to compute the +# practitioner’s metric :math:`g`. +# +# For a dataset of :math:`N` samples and a kernel function :math:`k(\cdot, \cdot)`, the Gram matrix +# :math:`K \in \mathbb{R}^{N \times N}` is defined entrywise as: +# +# .. math:: +# +# +# K_{ij} = k(x_i, x_j) +# +# Each entry :math:`K_{ij}` measures how similar two data points are, and the full matrix :math:`K` +# provides a **global view** of the data in the kernel’s feature space. +# +# We compute five such matrices, one for each kernel defined above: +# +# :math:`K_{\text{RBF}}` obtained from: +# +# .. math:: +# +# +# k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# +# :math:`K_{\text{QK-E1}}` obtained from: +# +# .. math:: +# +# +# k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 +# +# :math:`K_{\text{QK-E2}}` obtained from: +# +# .. math:: +# +# +# k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 +# +# :math:`K_{\text{PQK-E1}}` obtained from: +# +# .. math:: +# +# +# k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) +# +# :math:`K_{\text{PQK-E2}}` obtained from: +# +# .. math:: +# +# +# k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) +# +# The gram matrices will be used in downstream evaluations to compare kernel geometries and analyze +# expressivity and generalization metrics like :math:`g`. +# + +# The following code builds all five Gram (kernel) matrices: Classical, QK-E1, QK-E2, PQK-E1, PQK-E2 +from sklearn.metrics.pairwise import rbf_kernel + + +# ---------------------------------------------------------------------------# +# Classical RBF Gram matrix # +# ---------------------------------------------------------------------------# +def classical_rbf_kernel(X, gamma=1.0): + return rbf_kernel(X, gamma=gamma) + + +K_classical = classical_rbf_kernel(X_train) +print(f"K_RBF shape: {K_classical.shape}") + +# ---------------------------------------------------------------------------# +# Quantum fidelity-based Gram matrices # +# ---------------------------------------------------------------------------# +dev = qml.device("default.qubit", wires=n_qubits, shots=None) + + +def overlap_prob(x, y, embed): + """Probability of measuring |0…0⟩ after U(x) U†(y).""" + + @qml.qnode(dev) + def circuit(): + embed(x) + qml.adjoint(embed)(y) + return qml.probs(wires=range(n_qubits)) + + return circuit()[0] + + +def quantum_kernel_matrix(X, embed): + return qml.kernels.kernel_matrix(X, X, lambda v1, v2: overlap_prob(v1, v2, embed)) + + +print("Computing QK-E1 (fidelity)...") +K_quantum_E1 = quantum_kernel_matrix(X_train, embed=embedding_E1) + +print("Computing QK-E2 (fidelity)...") +K_quantum_E2 = quantum_kernel_matrix(X_train, embed=embedding_E2) + +print(f"K_QK_E1 shape: {K_quantum_E1.shape}") +print(f"K_QK_E2 shape: {K_quantum_E2.shape}") + + +# ---------------------------------------------------------------------------# +# Projected quantum kernels (Pauli vectors + classical RBF) # +# ---------------------------------------------------------------------------# +def get_pauli_vectors(embedding_func, X): + """Returns Pauli expectation vectors for each input using the given embedding.""" + observables = [] + for i in range(n_qubits): + observables.extend([qml.PauliX(i), qml.PauliY(i), qml.PauliZ(i)]) + + @qml.qnode(dev) + def pauli_qnode(x): + embedding_func(x) + return [qml.expval(obs) for obs in observables] + + vectors = [pauli_qnode(x) for x in X] + return np.array(vectors) + + +def calculate_gamma(vectors): + """Use heuristic gamma = 1 / (d * var) for RBF kernel on Pauli space.""" + d = vectors.shape[1] + var = np.var(vectors) + return 1.0 / (d * var) if var > 1e-8 else 1.0 + + +def pqk_kernel_matrix(X, embedding_func): + """Computes PQK kernel matrix from Pauli vectors + RBF kernel.""" + pauli_vecs = get_pauli_vectors(embedding_func, X) + gamma = calculate_gamma(pauli_vecs) + return rbf_kernel(pauli_vecs, gamma=gamma) + + +print("Computing PQK-E1 (Pauli + RBF)...") +K_pqk_E1 = pqk_kernel_matrix(X_train, embedding_E1) + +print("Computing PQK-E2 (Pauli + RBF)...") +K_pqk_E2 = pqk_kernel_matrix(X_train, embedding_E2) + +print(f"K_PQK_E1 shape: {K_pqk_E1.shape}") +print(f"K_PQK_E2 shape: {K_pqk_E2.shape}") + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# K_RBF shape: (210, 210) +# Computing QK-E1 (fidelity)... +# Computing QK-E2 (fidelity)... +# K_QK_E1 shape: (210, 210) +# K_QK_E2 shape: (210, 210) +# Computing PQK-E1 (Pauli + RBF)... +# Computing PQK-E2 (Pauli + RBF)... +# K_PQK_E1 shape: (210, 210) +# K_PQK_E2 shape: (210, 210) + +# Visualizing the Gram Matrices +import matplotlib.pyplot as plt + +# Visualize first 20x20 subset of each Gram matrix for clarity +subset_size = 20 +fig, axes = plt.subplots(1, 5, figsize=(20, 4)) + +matrices = [K_classical, K_quantum_E1, K_quantum_E2, K_pqk_E1, K_pqk_E2] +titles = ["Classical RBF", "QK-E1", "QK-E2", "PQK-E1", "PQK-E2"] + +for i, (K, title) in enumerate(zip(matrices, titles)): + im = axes[i].imshow(K[:subset_size, :subset_size], cmap="viridis", aspect="equal") + axes[i].set_title(title) + axes[i].set_xlabel("Sample index") + if i == 0: + axes[i].set_ylabel("Sample index") + plt.colorbar(im, ax=axes[i], fraction=0.046) + +plt.tight_layout() +plt.suptitle(f"Gram Matrix Visualizations (first {subset_size}×{subset_size} entries)", y=1.02) +plt.show() + +print("Each matrix shows how similar data points are to each other:") +print("- Brighter colors = higher similarity") +print("- Different patterns indicate different geometries") + +###################################################################### +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_6_1.png +# :align: center +# :width: 80% + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# Each matrix shows how similar data points are to each other: +# - Brighter colors = higher similarity +# - Different patterns indicate different geometries + +# We then compute the practitioner’s metric g for each quantum kernel, according to the formula used in the paper by Huang et al. +from scipy.linalg import sqrtm + + +def compute_g(K_classical, K_quantum, eps=1e-7): + """ + Compute geometric difference g between classical and quantum kernels. + Formula: g = sqrt( || sqrt(K_quantum) @ inv(K_classical) @ sqrt(K_quantum) || ) + """ + N = K_classical.shape[0] + + # Regularize and invert K_classical + Kc_reg = K_classical + eps * np.eye(N) + Kc_inv = np.linalg.inv(Kc_reg) + + # Compute the square root of the quantum kernel + sqrt_Kq = sqrtm(K_quantum) + # Construct M = sqrt(Kq) @ Kc⁻¹ @ sqrt(Kq) + M = sqrt_Kq @ Kc_inv @ sqrt_Kq + + # g = sqrt(max eigenvalue of M) + max_eigval = np.max(np.linalg.eigvalsh(M)) + return np.sqrt(np.maximum(max_eigval, 0.0)) + + +# Compute g for all four quantum kernels +g_QK_E1 = compute_g(K_classical, K_quantum_E1) +g_QK_E2 = compute_g(K_classical, K_quantum_E2) +g_PQK_E1 = compute_g(K_classical, K_pqk_E1) +g_PQK_E2 = compute_g(K_classical, K_pqk_E2) + +# Display results +print("\n--- Geometric Difference (g) ---") +print(f"g (RBF vs QK‑E1): {g_QK_E1:.4f}") +print(f"g (RBF vs QK‑E2): {g_QK_E2:.4f}") +print(f"g (RBF vs PQK‑E1): {g_PQK_E1:.4f}") +print(f"g (RBF vs PQK‑E2): {g_PQK_E2:.4f}") + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# +# --- Geometric Difference (g) --- +# g (RBF vs QK‑E1): 8.3359 +# g (RBF vs QK‑E2): 2.1493 +# g (RBF vs PQK‑E1): 894.0699 +# g (RBF vs PQK‑E2): 194.6228 + +###################################################################### +# What does a high :math:`g` really tell us? +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# We can see that in terms of :math:`g`: +# +# PQK-E1 > PQK-E2 > QK-E1 > QK-E2. +# +# A common misconception is that a higher geometric difference :math:`g` automatically means better +# classification performance, which might lead us to believe, for example, that in terms of final +# accuracy, the ranking will also be PQK-E1 > PQK-E2 > QK-E1 > QK-E2. +# +# | This intuition is understandable — after all, a larger :math:`g` suggests that the quantum kernel +# perceives the data very differently from a classical one. +# | But as we’ll see, **a higher :math:`g` doesn’t always translate into better accuracy, it just +# means there’s higher potential for an improvement over the classical model**. +# +# In fact, a higher :math:`g` can sometimes correspond to worse performance on the original task. +# +# Let’s see this in action. +# + +# We train SVMs using each kernel and compare test accuracy +from sklearn.svm import SVC +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import rbf_kernel + + +def train_evaluate_svm(K_train, K_test, y_train, y_test, name): + print(f"Training SVM with {name} kernel...") + clf = SVC(kernel="precomputed") + clf.fit(K_train, y_train) + acc = accuracy_score(y_test, clf.predict(K_test)) + print(f" Test accuracy: {acc:.4f}") + return acc + + +results = {} + +# Classical RBF +K_rbf_test = rbf_kernel(X_test, X_train) +results["Classical RBF"] = train_evaluate_svm( + K_classical, K_rbf_test, y_train, y_test, "Classical RBF" +) + +# Quantum Kernel E1 +K_qk_e1_test = qml.kernels.kernel_matrix( + X_test, X_train, lambda x, y: overlap_prob(x, y, embedding_E1) +) +results["QK-E1"] = train_evaluate_svm(K_quantum_E1, K_qk_e1_test, y_train, y_test, "Quantum E1") + +# Quantum Kernel E2 +K_qk_e2_test = qml.kernels.kernel_matrix( + X_test, X_train, lambda x, y: overlap_prob(x, y, embedding_E2) +) +results["QK-E2"] = train_evaluate_svm(K_quantum_E2, K_qk_e2_test, y_train, y_test, "Quantum E2") + +# PQK E1 +pauli_test_E1 = get_pauli_vectors(embedding_E1, X_test) +gamma_E1 = calculate_gamma(np.vstack((get_pauli_vectors(embedding_E1, X_train), pauli_test_E1))) +K_pqk_e1_test = rbf_kernel(pauli_test_E1, get_pauli_vectors(embedding_E1, X_train), gamma=gamma_E1) +results["PQK-E1"] = train_evaluate_svm(K_pqk_E1, K_pqk_e1_test, y_train, y_test, "PQK E1") + +# PQK E2 +pauli_test_E2 = get_pauli_vectors(embedding_E2, X_test) +gamma_E2 = calculate_gamma(np.vstack((get_pauli_vectors(embedding_E2, X_train), pauli_test_E2))) +K_pqk_e2_test = rbf_kernel(pauli_test_E2, get_pauli_vectors(embedding_E2, X_train), gamma=gamma_E2) +results["PQK-E2"] = train_evaluate_svm(K_pqk_E2, K_pqk_e2_test, y_train, y_test, "PQK E2") + +# Summary +print("\n--- Accuracy Comparison ---") +for model, acc in results.items(): + print(f"{model:>15}: {acc:.4f}") + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# Training SVM with Classical RBF kernel... +# Test accuracy: 0.9111 +# Training SVM with Quantum E1 kernel... +# Test accuracy: 0.8333 +# Training SVM with Quantum E2 kernel... +# Test accuracy: 0.8444 +# Training SVM with PQK E1 kernel... +# Test accuracy: 0.8333 +# Training SVM with PQK E2 kernel... +# Test accuracy: 1.0000 +# +# --- Accuracy Comparison --- +# Classical RBF: 0.9111 +# QK-E1: 0.8333 +# QK-E2: 0.8444 +# PQK-E1: 0.8333 +# PQK-E2: 1.0000 + +# Accuracy Comparison + +import matplotlib.pyplot as plt + +# Extract model names and accuracies +model_names = list(results.keys()) +accuracies = [results[name] for name in model_names] + +# Create bar chart +plt.figure(figsize=(10, 4)) +bars = plt.bar(model_names, accuracies) +plt.ylim(0, 1.10) +plt.ylabel("Test Accuracy") +plt.title("SVM Accuracy by Kernel") +plt.xticks(rotation=15) +plt.grid(axis="y", linestyle="--", alpha=0.4) + +# Annotate values +for bar, acc in zip(bars, accuracies): + yval = bar.get_height() + plt.text( + bar.get_x() + bar.get_width() / 2, yval + 0.015, f"{acc:.2f}", ha="center", va="bottom" + ) + +plt.show() + +###################################################################### +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_10_1.png +# :align: center +# :width: 80% + +###################################################################### +# | Our test results reveal an important subtlety: +# | **A higher geometric difference :math:`g` does not guarantee better classification accuracy.** +# +# For instance: - **PQK‑E2** achieved perfect test accuracy (:math:`100\%`), despite having a lower +# :math:`g` than PQK‑E1. +# +# | This highlights a key message from the paper: +# | > The role of :math:`g` is *not* to predict which kernel will perform best on a given task — but +# rather to obtain a collection of kernels that have the *potential* to offer an advantage. +# +# Here, PQK-E1 and PQK-E2 both had the potential for an advantage over classical, but PQK-E2 is the +# only one that actually achieved the advantage. As a simple practical rule, if :math:`g` is low, then +# we can immediately discard a quantum kernel, whereas if :math:`g` is high, we keep the kernel as a +# potential solution because it offers a potential for an improvement on our classification problem. +# This way, we have an important diagnostic tool to filter out bad quantum kernels for our data. +# + +###################################################################### +# 🧠 Conclusion: A Practical Perspective on the Geometric Difference :math:`g` +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In this notebook, we explored a fundamental question in quantum machine learning: +# +# **Can we anticipate, before training, whether a quantum kernel might outperform a classical +# one?** +# +# To address this, we used the **geometric difference :math:`g`**, a pre-training metric introduced by +# Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a +# classical kernel. +# +# -------------- +# +# 🔑 Key takeaways: +# ~~~~~~~~~~~~~~~~~ +# +# - | **:math:`g` is a diagnostic, not a performance predictor.** +# | A large :math:`g` indicates that the quantum kernel induces a very different geometry from the +# classical one — a *necessary*, but not *sufficient*, condition for quantum advantage. +# +# - | **Higher :math:`g` does not imply higher accuracy.** +# | In our results, **PQK‑E2** had a high :math:`g` and achieved perfect accuracy — but **PQK‑E1**, +# with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that +# :math:`g` measures *potential*, not realized performance. +# +# - | **:math:`g`\ ’s value is in ruling out unpromising kernels.** +# | Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical +# methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a +# classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may +# be possible* — not that it will be realized. +# + +###################################################################### +# Appendix: What if We Take the Labels Into Account? +# -------------------------------------------------- +# +# The cells above explore the importance of :math:`g` in a practical setting. However, as an appendix, +# we also present a construction from the paper that’s pretty fun to play around with. We mentioned +# that a high :math:`g` does not necessarily mean that a quantum kernel will outperform a classical +# kernel, such as the case of PQK-E1. This is because :math:`g` does not take the dataset *labels* +# into account in a supervised learning setting (such as the SVM we are exploring in the paper), and +# it could be that the specific labels we have are not a good fit for the geometry of the kernel. The +# authors proposed a method to artificially construct new labels that align with a quantum kernel’s +# geometry. This guarantees that if :math:`g` is large enough, we will get an improvement since both +# the input features and labels now match the geometry of the kernel. This is a toy construction; it’s +# not practical because we usually care about the actual dataset labels we have rather than the fake +# labels, but it’s good for getting more intuition about the role of :math:`g` and why it sometimes +# fails in predicting performance. +# +# How Label Re-engineering Works: +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Given :math:`K_Q` and :math:`K_C`, the process generates new labels that maximize quantum kernel +# advantage: +# +# 1. Compute the matrix :math:`M = \sqrt{K_Q}(K_C)^{-1}\sqrt{K_Q}` +# 2. Find the eigenvector :math:`v` corresponding to the largest eigenvalue :math:`g^2` of :math:`M` +# 3. Project this eigenvector through :math:`\sqrt{K_Q}` to get new continuous labels: +# :math:`y = \sqrt{K_Q}v` +# 4. Binarize using the median as threshold to create classification labels +# +# The new labels match the geometry of :math:`K_Q` and provide an advantage which is proportional to +# :math:`g`. +# +# Let’s do this! +# + +# Engineer labels for PQK-E1 +# ---------------------------------------------------------------------------# +# Rebuild full X, y used in kernel computations # +# ---------------------------------------------------------------------------# +X_all = np.vstack([X_train, X_test]) +y_all = np.concatenate([y_train, y_test]) +n_samples = X_all.shape[0] +# ---------------------------------------------------------------------------# +# Recompute kernels on full dataset # +# ---------------------------------------------------------------------------# +print("Recomputing QK‑E1 and classical kernel on full dataset...") +K_qk_E1_full = quantum_kernel_matrix(X_all, embed=embedding_E1) +K_classical_full = rbf_kernel(X_all) +# ---------------------------------------------------------------------------# +# Generate engineered labels using eigendecomposition of quantum kernel +# ---------------------------------------------------------------------------# +print("Generating engineered labels to favor QK‑E1...") +# Compute matrix square roots and inverse +KQ_sqrt = sqrtm(K_qk_E1_full) +KC_reg = K_classical_full + 1e-7 * np.eye(K_classical_full.shape[0]) +KC_inv = np.linalg.inv(KC_reg) + +# Solve eigenvalue problem: √KQ(KC)^(-1)√KQ +M = KQ_sqrt @ KC_inv @ KQ_sqrt +eigvals, eigvecs = np.linalg.eigh(M) +v_max = eigvecs[:, -1] # Largest eigenvalue's eigenvector + +# Apply square root transformation: y = √KQ v +y_engineered_continuous = KQ_sqrt @ v_max + +# Threshold at median to create binary labels +median_val = np.median(y_engineered_continuous) +label_low, label_high = np.min(y_all), np.max(y_all) +y_engineered = np.where(y_engineered_continuous > median_val, label_high, label_low).astype( + y_all.dtype +) +print("✅ Engineered labels generated successfully.") +print(f"Class {label_low}: {np.sum(y_engineered == label_low)}") +print(f"Class {label_high}: {np.sum(y_engineered == label_high)}") +# -----------------------------------------------------------------------# +# Resplit into train/test using original index split # +# -----------------------------------------------------------------------# +y_train_eng = y_engineered[: len(y_train)] +y_test_eng = y_engineered[len(y_train) :] + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# Recomputing QK‑E1 and classical kernel on full dataset... +# Generating engineered labels to favor QK‑E1... +# ✅ Engineered labels generated successfully. +# Class 0: 150 +# Class 1: 150 + +# We plot the newly re-engineered dataset for PQK-E1 + +plt.figure(figsize=(4, 4)) +plt.scatter( + X_train[y_train_eng == 0, 0], + X_train[y_train_eng == 0, 1], + s=15, + alpha=0.8, + label="Engineered Class 0", +) +plt.scatter( + X_train[y_train_eng == 1, 0], + X_train[y_train_eng == 1, 1], + s=15, + alpha=0.8, + label="Engineered Class 1", +) +plt.axis("equal") +plt.title("QK‑E1 engineered labels (training set)") +plt.legend(frameon=False) +plt.show() + +###################################################################### +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_15_1.png +# :align: center +# :width: 80% + +# We train SVMs using each kernel and compare test accuracy on the new engineered labels. +from sklearn.svm import SVC +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import rbf_kernel + +results_engineered = {} + +# Classical RBF +K_rbf_test = rbf_kernel(X_test, X_train) +results_engineered["Classical RBF"] = train_evaluate_svm( + K_classical, K_rbf_test, y_train_eng, y_test_eng, "Classical RBF" +) + +# Quantum Kernel E1 +K_qk_e1_test = qml.kernels.kernel_matrix( + X_test, X_train, lambda x, y: overlap_prob(x, y, embedding_E1) +) +results_engineered["QK-E1"] = train_evaluate_svm( + K_quantum_E1, K_qk_e1_test, y_train_eng, y_test_eng, "Quantum E1" +) + +# Quantum Kernel E2 +K_qk_e2_test = qml.kernels.kernel_matrix( + X_test, X_train, lambda x, y: overlap_prob(x, y, embedding_E2) +) +results_engineered["QK-E2"] = train_evaluate_svm( + K_quantum_E2, K_qk_e2_test, y_train_eng, y_test_eng, "Quantum E2" +) + +# PQK E1 +pauli_test_E1 = get_pauli_vectors(embedding_E1, X_test) +gamma_E1 = calculate_gamma(np.vstack((get_pauli_vectors(embedding_E1, X_train), pauli_test_E1))) +K_pqk_e1_test = rbf_kernel(pauli_test_E1, get_pauli_vectors(embedding_E1, X_train), gamma=gamma_E1) +results_engineered["PQK-E1"] = train_evaluate_svm( + K_pqk_E1, K_pqk_e1_test, y_train_eng, y_test_eng, "PQK E1" +) + +# PQK E2 +pauli_test_E2 = get_pauli_vectors(embedding_E2, X_test) +gamma_E2 = calculate_gamma(np.vstack((get_pauli_vectors(embedding_E2, X_train), pauli_test_E2))) +K_pqk_e2_test = rbf_kernel(pauli_test_E2, get_pauli_vectors(embedding_E2, X_train), gamma=gamma_E2) +results_engineered["PQK-E2"] = train_evaluate_svm( + K_pqk_E2, K_pqk_e2_test, y_train_eng, y_test_eng, "PQK E2" +) + +# Summary +print("\n--- Accuracy Comparison (Engineered Labels) ---") +for model, acc in results_engineered.items(): + print(f"{model:>15}: {acc:.4f}") + +###################################################################### +# .. rst-class:: sphx-glr-script-out +# +# .. code-block:: none +# +# Training SVM with Classical RBF kernel... +# Test accuracy: 0.6111 +# Training SVM with Quantum E1 kernel... +# Test accuracy: 0.8667 +# Training SVM with Quantum E2 kernel... +# Test accuracy: 0.6222 +# Training SVM with PQK E1 kernel... +# Test accuracy: 0.8667 +# Training SVM with PQK E2 kernel... +# Test accuracy: 0.7444 +# +# --- Accuracy Comparison (Engineered Labels) --- +# Classical RBF: 0.6111 +# QK-E1: 0.8667 +# QK-E2: 0.6222 +# PQK-E1: 0.8667 +# PQK-E2: 0.7444 + +# Accuracy Comparison — Side-by-Side Bars (Original vs Engineered) for PQK-E1 + +import matplotlib.pyplot as plt +import numpy as np + +model_names = list(results.keys()) +x = np.arange(len(model_names)) # the label locations +width = 0.35 # width of each bar + +# Get accuracy values +acc_orig = [results[name] for name in model_names] +acc_eng = [results_engineered[name] for name in model_names] + +# Plot +plt.figure(figsize=(10, 5)) +bars1 = plt.bar(x - width / 2, acc_orig, width, label="Original", color="tab:blue") +bars2 = plt.bar(x + width / 2, acc_eng, width, label="Engineered", color="tab:orange") + +# Labels and ticks +plt.ylabel("Test Accuracy") +plt.title("SVM Accuracy by Kernel (Original vs Engineered Labels)") +plt.xticks(x, model_names, rotation=15) +plt.ylim(0, 1.05) +plt.grid(axis="y", linestyle="--", alpha=0.4) +plt.legend() + +# Annotate bars +for bar in bars1: + yval = bar.get_height() + plt.text(bar.get_x() + bar.get_width() / 2, yval + 0.015, f"{yval:.2f}", ha="center") + +for bar in bars2: + yval = bar.get_height() + plt.text(bar.get_x() + bar.get_width() / 2, yval + 0.015, f"{yval:.2f}", ha="center") + +plt.tight_layout() +plt.show() + + +###################################################################### +# +# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_17_1.png +# :align: center +# :width: 80% diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json new file mode 100644 index 0000000000..7a25f5faab --- /dev/null +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -0,0 +1,40 @@ +{ + "title": "Before You Train: Pre-screening Quantum Kernels with Geometric Difference", + "authors": [ + { + "username": "andynader" + } + ], + "executable_stable": true, + "executable_latest": true, + "dateOfPublication": "", + "dateOfLastModification": "", + "categories": [ + "Quantum Machine Learning" + ], + "tags": [], + "previewImages": [ + { + "type": "thumbnail", + "uri": "/_static/demo_thumbnails/regular_demo_thumbnails/thumbnail_placeholder.png" + } + ], + "seoDescription": "Learn how to pre-screen quantum kernels using the geometric difference metric (g) before training. This tutorial implements the approach from Huang et al. (2021) to assess whether a quantum kernel offers any geometric advantage over classical kernels. Using synthetic data and multiple quantum embeddings, we demonstrate how to calculate g values and interpret them to identify promising quantum kernels worth pursuing. This saves valuable research time by ruling out fundamentally limited approaches early.", + "doi": "", + "references": [ + { + "id": "Huang2021", + "type": "preprint", + "title": "Power of data in quantum machine learning", + "authors": "Hsin-Yuan Huang, Michael Broughton, Masoud Mohseni, Ryan Babbush, Sergio Boixo, Hartmut Neven, Jarrod R. McClean", + "year": "2021", + "journal": "arXiv", + "url": "https://arxiv.org/abs/2011.01938" + } + ], + "basedOnPapers": [ + "2011.01938" + ], + "referencedByPapers": [], + "relatedContent": [] +} diff --git a/demonstrations_v2/huang_geometric_kernel_difference/requirements.in b/demonstrations_v2/huang_geometric_kernel_difference/requirements.in new file mode 100644 index 0000000000..2e44f41301 --- /dev/null +++ b/demonstrations_v2/huang_geometric_kernel_difference/requirements.in @@ -0,0 +1,5 @@ +pennylane +scikit-learn +numpy +matplotlib +scipy From c228923b31d4a75ab22993c8edf9024f081ce4ee Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:32:13 -0400 Subject: [PATCH 02/31] Update metadata.json this is for validation and make a preview --- .../huang_geometric_kernel_difference/metadata.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json index 7a25f5faab..8536543e57 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -7,8 +7,8 @@ ], "executable_stable": true, "executable_latest": true, - "dateOfPublication": "", - "dateOfLastModification": "", + "dateOfPublication": "2025-10-06T10:00:00+00:00", + "dateOfLastModification": "2025-10-06T10:00:00+00:01", "categories": [ "Quantum Machine Learning" ], From 6b2b61d72b294b3d19e5edf09a8b4071da30447d Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 10:08:27 -0400 Subject: [PATCH 03/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 32 ++++++++----------- 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 4d1fee8b60..5866160d24 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -1,17 +1,17 @@ r"""Is a Quantum Kernel Worth Exploring? A ‘:math:`g`-first’ Reality Check ====================================================================== -Can we predict—*before* investing a ton of research hours- whether a quantum kernel has the +Can we predict---*before* investing a ton of research hours---whether a quantum kernel has the potential to beat a classical one across all kernel methods? From a practitioner’s perspective, such a **pre-screening test** is invaluable: it lets us rule out quantum kernels that don’t offer any potential quantum advantage right from the start. Huang *et al.* (https://arxiv.org/abs/2011.01938) introduced exactly this test. Their proposed -**geometric difference :math:`g`** metric is a single scalar that quantifies how differently the +**geometric difference** :math:`g` metric is a single scalar that quantifies how differently the geometries defined by two kernels represent your data. The formula for :math:`g` is: -:math:`g = \sqrt{\|\sqrt{K_q} K_c^{-1} \sqrt{K_q}\|_\infty}` +.. math:: g = \sqrt{\|\sqrt{K_q} K_c^{-1} \sqrt{K_q}\|_\infty}, where :math:`K_q` and :math:`K_c` are quantum and classical Gram matrices, respectively. @@ -47,12 +47,12 @@ What :math:`g` tells us: ------------------------ -- **If :math:`g` ≈ 1**: The quantum kernel’s geometry is essentially the same as a good classical +- **If** :math:`g \approx 1:` The quantum kernel’s geometry is essentially the same as a good classical kernel’s → the quantum kernel offers no geometric advantage, making it unlikely to outperform the classical kernel **in any kernel-based learning algorithm** (e.g., SVM, Gaussian Processes). Huang - et al. proved this concept in a rigorous mathematical way in their paper. + et al. proved this concept in a rigorous mathematical way in their paper. -- **If :math:`g` ≫ 1**: The quantum geometry is genuinely different → **a kernel method using the +- **If** :math:`g >> 1:` The quantum geometry is genuinely different → **a kernel method using the quantum kernel** *might* offer an advantage. Why this matters: @@ -66,7 +66,7 @@ When a quantum kernel performs poorly, researchers often spend days exploring different algorithm hyperparameters, cross-validation strategies, and implementation debugging. If :math:`g \approx 1`, -you immediately know the quantum kernel’s geometry offers no advantage—it’s not your implementation, +you immediately know the quantum kernel’s geometry offers no advantage---it’s not your implementation, not your algorithm choice, and not a hyperparameter issue. The kernel is fundamentally limited compared to classical kernels on this specific dataset. @@ -133,26 +133,22 @@ # .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_1_1.png # :align: center # :width: 80% - -###################################################################### +# # Quantum kernels: fidelity-based and projected variants # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# | We consider **five different kernels**, derived from three sources: a classical RBF kernel and two -# quantum embedding circuits — **E1** and **E2**. -# | Each kernel defines a different geometry for measuring similarity between data points. +# We consider **five different kernels**, derived from three sources: a classical RBF kernel and two +# quantum embedding circuits — **E1** and **E2**. +# Each kernel defines a different geometry for measuring similarity between data points. # # - | **RBF – Classical radial basis function kernel** # | A classical baseline defined as: # | # -# .. math:: -# -# -# k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) # -# This maps data into an infinite-dimensional space where closer inputs remain close, and distant -# ones become nearly orthogonal. +# | This maps data into an infinite-dimensional space where closer inputs remain close, and distant +# | ones become nearly orthogonal. # | It captures a **geometric**, distance-based notion of similarity in input space. # # - | **E1 – Separable RX rotations** From 4b6ddb945a79b2403095adca676239367e07773e Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 10:43:03 -0400 Subject: [PATCH 04/31] render quickly --- .../huang_geometric_kernel_difference/demo.py | 108 ++++++------------ .../metadata.json | 4 +- 2 files changed, 35 insertions(+), 77 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 5866160d24..1733d47755 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -44,7 +44,7 @@ The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store all pairwise similarities between data points. -What :math:`g` tells us: +What g tells us: ------------------------ - **If** :math:`g \approx 1:` The quantum kernel’s geometry is essentially the same as a good classical @@ -119,16 +119,11 @@ plt.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], s=15, alpha=0.8, label="class 0") plt.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], s=15, alpha=0.8, label="class 1") plt.axis("equal") -plt.title("Two‑moons — training split (standardised)") +plt.title("Two‑moons— training split (standardised)") plt.legend(frameon=False) plt.show() ###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# -# Train size: 210 Test size: 90 # # .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_1_1.png # :align: center @@ -141,74 +136,53 @@ # quantum embedding circuits — **E1** and **E2**. # Each kernel defines a different geometry for measuring similarity between data points. # -# - | **RBF – Classical radial basis function kernel** -# | A classical baseline defined as: -# | +# - **RBF – Classical radial basis function kernel** +# A classical baseline defined as: # # .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) # -# | This maps data into an infinite-dimensional space where closer inputs remain close, and distant -# | ones become nearly orthogonal. -# | It captures a **geometric**, distance-based notion of similarity in input space. -# -# - | **E1 – Separable RX rotations** -# | Each input feature :math:`x_j` is encoded into a single qubit using an :math:`RX(x_j)` gate. -# | The circuit is fully separable (no entanglement), producing the quantum state -# :math:`\lvert \psi_{\text{E1}}(x) \rangle`. +# This maps data into an infinite-dimensional space where closer inputs remain close, and distant +# ones become nearly orthogonal. +# It captures a **geometric**, distance-based notion of similarity in input space. # -# - | **E2 – IQP embedding** -# | PennyLane’s ``qml.IQPEmbedding`` applies Hadamards, parameterized :math:`RZ(x_j)` rotations, and -# entangling ZZ gates. -# | This creates an entangled quantum state :math:`\lvert \psi_{\text{E2}}(x) \rangle`, inspired by -# Instantaneous Quantum Polynomial (IQP) circuits. +# - **E1 – Separable RX rotations** +# Each input feature :math:`x_j` is encoded into a single qubit using an :math:`RX(x_j)` gate. +# The circuit is fully separable (no entanglement), producing the quantum state +# :math:`\lvert \psi_{\text{E1}}(x) \rangle`. # -# - | **QK – Standard quantum kernels** -# | For both E1 and E2, the kernel is defined by the **fidelity** between quantum states: +# - **E2 – IQP embedding** +# PennyLane’s ``qml.IQPEmbedding`` applies Hadamards, parameterized :math:`RZ(x_j)` rotations, and +# entangling ZZ gates. +# This creates an entangled quantum state :math:`\lvert \psi_{\text{E2}}(x) \rangle`, inspired by +# Instantaneous Quantum Polynomial (IQP) circuits. # -# .. math:: +# - **QK – Standard quantum kernels** +# For both E1 and E2, the kernel is defined by the **fidelity** between quantum states: # +# .. math:: k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 # -# k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 -# -# .. math:: -# -# -# k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 +# .. math:: k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 # # where :math:`\psi_{\text{E1}}(x)` and :math:`\psi_{\text{E2}}(x)` are the quantum states generated # by E1 and E2 respectively. These kernels reflect how aligned two quantum feature states are in # Hilbert space. # -# - | **PQK – Projected quantum kernels (PQK-E1 / PQK-E2)** -# | For a projected quantum kernel, instead of computing fidelity, the output quantum state -# :math:`|\psi(x)\rangle` -# | is **measured** to extract the expectation values of Pauli operators: -# | -# -# .. math:: +# - **PQK – Projected quantum kernels (PQK-E1 / PQK-E2)** +# For a projected quantum kernel, instead of computing fidelity, the output quantum state +# :math:`|\psi(x)\rangle` +# is **measured** to extract the expectation values of Pauli operators: # +# .. math:: v(x) = \left[ \langle X_0 \rangle, \langle Y_0 \rangle, \langle Z_0 \rangle, \dots, \langle Z_{n-1} \rangle \right] # -# v(x) = \left[ \langle X_0 \rangle, \langle Y_0 \rangle, \langle Z_0 \rangle, \dots, \langle Z_{n-1} \rangle \right] +# A classical **RBF kernel** is then applied to these real-valued vectors: # -# A classical **RBF kernel** is then applied to these real-valued vectors: -# | +# .. math:: k_{\text{PQK}}(x, x') = \exp\left( -\gamma \| v(x) - v(x') \|^2 \right) # -# .. math:: +# We obtain two different projected quantum kernels from E1 and E1: # +# .. math:: k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) # -# k_{\text{PQK}}(x, x') = \exp\left( -\gamma \| v(x) - v(x') \|^2 \right) -# -# We obtain two different projected quantum kernels from E1 and E1: -# -# .. math:: -# -# -# k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) -# -# .. math:: -# -# -# k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) +# .. math:: k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) # # where :math:`v_{\text{E1 }}(x)` and :math:`v_{\text{E2}}(x)` are the Pauli expectation vector from # E1 and E2 respectively. @@ -243,29 +217,19 @@ def embedding_E2(features): plt.show() ###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none # # E1 Embedding Circuit: # # .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_1.png # :align: center # :width: 80% - -###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# # # E2 Embedding Circuit: # # .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_2.png # :align: center # :width: 80% - -###################################################################### +# # Gram Matrix Computation # ~~~~~~~~~~~~~~~~~~~~~~~ # @@ -275,10 +239,7 @@ def embedding_E2(features): # For a dataset of :math:`N` samples and a kernel function :math:`k(\cdot, \cdot)`, the Gram matrix # :math:`K \in \mathbb{R}^{N \times N}` is defined entrywise as: # -# .. math:: -# -# -# K_{ij} = k(x_i, x_j) +# .. math:: K_{ij} = k(x_i, x_j) # # Each entry :math:`K_{ij}` measures how similar two data points are, and the full matrix :math:`K` # provides a **global view** of the data in the kernel’s feature space. @@ -287,10 +248,7 @@ def embedding_E2(features): # # :math:`K_{\text{RBF}}` obtained from: # -# .. math:: -# -# -# k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) # # :math:`K_{\text{QK-E1}}` obtained from: # diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json index 8536543e57..21de723e27 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -5,8 +5,8 @@ "username": "andynader" } ], - "executable_stable": true, - "executable_latest": true, + "executable_stable": false, + "executable_latest": false, "dateOfPublication": "2025-10-06T10:00:00+00:00", "dateOfLastModification": "2025-10-06T10:00:00+00:01", "categories": [ From b5aee266691ddf61caa014c9311dbd0ac89603ac Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:12:01 -0400 Subject: [PATCH 05/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 125 ++++-------------- 1 file changed, 28 insertions(+), 97 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 1733d47755..5188bf89c5 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -139,7 +139,7 @@ # - **RBF – Classical radial basis function kernel** # A classical baseline defined as: # -# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) # # This maps data into an infinite-dimensional space where closer inputs remain close, and distant # ones become nearly orthogonal. @@ -172,7 +172,7 @@ # :math:`|\psi(x)\rangle` # is **measured** to extract the expectation values of Pauli operators: # -# .. math:: v(x) = \left[ \langle X_0 \rangle, \langle Y_0 \rangle, \langle Z_0 \rangle, \dots, \langle Z_{n-1} \rangle \right] +# .. math:: v(x) = \left[ \langle X_0 \rangle, \langle Y_0 \rangle, \langle Z_0 \rangle, \dots, \langle Z_{n-1} \rangle \right] # # A classical **RBF kernel** is then applied to these real-valued vectors: # @@ -252,31 +252,19 @@ def embedding_E2(features): # # :math:`K_{\text{QK-E1}}` obtained from: # -# .. math:: -# -# -# k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 +# .. math:: k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 # # :math:`K_{\text{QK-E2}}` obtained from: # -# .. math:: -# -# -# k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 +# .. math:: k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 # # :math:`K_{\text{PQK-E1}}` obtained from: # -# .. math:: -# -# -# k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) +# .. math:: k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) # # :math:`K_{\text{PQK-E2}}` obtained from: # -# .. math:: -# -# -# k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) +# .. math:: k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) # # The gram matrices will be used in downstream evaluations to compare kernel geometries and analyze # expressivity and generalization metrics like :math:`g`. @@ -370,19 +358,7 @@ def pqk_kernel_matrix(X, embedding_func): print(f"K_PQK_E2 shape: {K_pqk_E2.shape}") ###################################################################### -# .. rst-class:: sphx-glr-script-out # -# .. code-block:: none -# -# K_RBF shape: (210, 210) -# Computing QK-E1 (fidelity)... -# Computing QK-E2 (fidelity)... -# K_QK_E1 shape: (210, 210) -# K_QK_E2 shape: (210, 210) -# Computing PQK-E1 (Pauli + RBF)... -# Computing PQK-E2 (Pauli + RBF)... -# K_PQK_E1 shape: (210, 210) -# K_PQK_E2 shape: (210, 210) # Visualizing the Gram Matrices import matplotlib.pyplot as plt @@ -417,15 +393,9 @@ def pqk_kernel_matrix(X, embedding_func): # :width: 80% ###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# -# Each matrix shows how similar data points are to each other: -# - Brighter colors = higher similarity -# - Different patterns indicate different geometries +# +# We then compute the practitioner’s metric :math:`g` for each quantum kernel, according to the formula used in the paper by Huang et al. -# We then compute the practitioner’s metric g for each quantum kernel, according to the formula used in the paper by Huang et al. from scipy.linalg import sqrtm @@ -464,19 +434,7 @@ def compute_g(K_classical, K_quantum, eps=1e-7): print(f"g (RBF vs PQK‑E2): {g_PQK_E2:.4f}") ###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# -# -# --- Geometric Difference (g) --- -# g (RBF vs QK‑E1): 8.3359 -# g (RBF vs QK‑E2): 2.1493 -# g (RBF vs PQK‑E1): 894.0699 -# g (RBF vs PQK‑E2): 194.6228 - -###################################################################### -# What does a high :math:`g` really tell us? +# What does a high g really tell us? # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We can see that in terms of :math:`g`: @@ -487,10 +445,10 @@ def compute_g(K_classical, K_quantum, eps=1e-7): # classification performance, which might lead us to believe, for example, that in terms of final # accuracy, the ranking will also be PQK-E1 > PQK-E2 > QK-E1 > QK-E2. # -# | This intuition is understandable — after all, a larger :math:`g` suggests that the quantum kernel -# perceives the data very differently from a classical one. -# | But as we’ll see, **a higher :math:`g` doesn’t always translate into better accuracy, it just -# means there’s higher potential for an improvement over the classical model**. +# This intuition is understandable — after all, a larger :math:`g` suggests that the quantum kernel +# perceives the data very differently from a classical one. +# But as we’ll see, **a higher :math:`g` doesn’t always translate into better accuracy, it just +# means there’s higher potential for an improvement over the classical model**. # # In fact, a higher :math:`g` can sometimes correspond to worse performance on the original task. # @@ -549,29 +507,6 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): for model, acc in results.items(): print(f"{model:>15}: {acc:.4f}") -###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# -# Training SVM with Classical RBF kernel... -# Test accuracy: 0.9111 -# Training SVM with Quantum E1 kernel... -# Test accuracy: 0.8333 -# Training SVM with Quantum E2 kernel... -# Test accuracy: 0.8444 -# Training SVM with PQK E1 kernel... -# Test accuracy: 0.8333 -# Training SVM with PQK E2 kernel... -# Test accuracy: 1.0000 -# -# --- Accuracy Comparison --- -# Classical RBF: 0.9111 -# QK-E1: 0.8333 -# QK-E2: 0.8444 -# PQK-E1: 0.8333 -# PQK-E2: 1.0000 - # Accuracy Comparison import matplotlib.pyplot as plt @@ -605,13 +540,13 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # :width: 80% ###################################################################### -# | Our test results reveal an important subtlety: -# | **A higher geometric difference :math:`g` does not guarantee better classification accuracy.** +# Our test results reveal an important subtlety: +# **A higher geometric difference :math:`g` does not guarantee better classification accuracy.** # # For instance: - **PQK‑E2** achieved perfect test accuracy (:math:`100\%`), despite having a lower # :math:`g` than PQK‑E1. # -# | This highlights a key message from the paper: +# This highlights a key message from the paper: # | > The role of :math:`g` is *not* to predict which kernel will perform best on a given task — but # rather to obtain a collection of kernels that have the *potential* to offer an advantage. # @@ -621,8 +556,6 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # potential solution because it offers a potential for an improvement on our classification problem. # This way, we have an important diagnostic tool to filter out bad quantum kernels for our data. # - -###################################################################### # 🧠 Conclusion: A Practical Perspective on the Geometric Difference :math:`g` # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # @@ -640,23 +573,21 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # 🔑 Key takeaways: # ~~~~~~~~~~~~~~~~~ # -# - | **:math:`g` is a diagnostic, not a performance predictor.** -# | A large :math:`g` indicates that the quantum kernel induces a very different geometry from the -# classical one — a *necessary*, but not *sufficient*, condition for quantum advantage. +# - :math:`g` **is a diagnostic, not a performance predictor.** +# A large :math:`g` indicates that the quantum kernel induces a very different geometry from the +# classical one — a *necessary*, but not *sufficient*, condition for quantum advantage. # -# - | **Higher :math:`g` does not imply higher accuracy.** -# | In our results, **PQK‑E2** had a high :math:`g` and achieved perfect accuracy — but **PQK‑E1**, -# with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that -# :math:`g` measures *potential*, not realized performance. +# - **Higher** :math:`g` **does not imply higher accuracy.** +# In our results, **PQK‑E2** had a high :math:`g` and achieved perfect accuracy — but **PQK‑E1**, +# with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that +# :math:`g` measures *potential*, not realized performance. # -# - | **:math:`g`\ ’s value is in ruling out unpromising kernels.** -# | Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical -# methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a -# classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may -# be possible* — not that it will be realized. +# - :math:`g`**’s value is in ruling out unpromising kernels.** +# Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical +# methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a +# classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may +# be possible* — not that it will be realized. # - -###################################################################### # Appendix: What if We Take the Labels Into Account? # -------------------------------------------------- # From 4e87ef07b6cd1c2502016a1aabd34b910a347929 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:56:54 -0400 Subject: [PATCH 06/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 5188bf89c5..ec1357f714 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -538,8 +538,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_10_1.png # :align: center # :width: 80% - -###################################################################### +# # Our test results reveal an important subtlety: # **A higher geometric difference :math:`g` does not guarantee better classification accuracy.** # @@ -547,7 +546,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # :math:`g` than PQK‑E1. # # This highlights a key message from the paper: -# | > The role of :math:`g` is *not* to predict which kernel will perform best on a given task — but +# > The role of :math:`g` is *not* to predict which kernel will perform best on a given task, but # rather to obtain a collection of kernels that have the *potential* to offer an advantage. # # Here, PQK-E1 and PQK-E2 both had the potential for an advantage over classical, but PQK-E2 is the @@ -582,7 +581,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that # :math:`g` measures *potential*, not realized performance. # -# - :math:`g`**’s value is in ruling out unpromising kernels.** +# - :math:`g`**'s value is in ruling out unpromising kernels.** # Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical # methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a # classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may From 0c88e88614eda6ea88f24b6744a9201028225a24 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:50:17 -0400 Subject: [PATCH 07/31] execute it again --- .../huang_geometric_kernel_difference/demo.py | 14 +++++++------- .../metadata.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index ec1357f714..2244126a13 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -540,13 +540,13 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # :width: 80% # # Our test results reveal an important subtlety: -# **A higher geometric difference :math:`g` does not guarantee better classification accuracy.** +# **A higher geometric difference** :math:`g` **does not guarantee better classification accuracy.** # -# For instance: - **PQK‑E2** achieved perfect test accuracy (:math:`100\%`), despite having a lower +# For instance, **PQK‑E2** achieved perfect test accuracy (:math:`100\%`), despite having a lower # :math:`g` than PQK‑E1. # # This highlights a key message from the paper: -# > The role of :math:`g` is *not* to predict which kernel will perform best on a given task, but +# - The role of :math:`g` is *not* to predict which kernel will perform best on a given task, but # rather to obtain a collection of kernels that have the *potential* to offer an advantage. # # Here, PQK-E1 and PQK-E2 both had the potential for an advantage over classical, but PQK-E2 is the @@ -560,11 +560,11 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # # In this notebook, we explored a fundamental question in quantum machine learning: # -# **Can we anticipate, before training, whether a quantum kernel might outperform a classical -# one?** +# **Can we anticipate, before training, whether a quantum kernel might outperform a classical +# one?** # # To address this, we used the **geometric difference :math:`g`**, a pre-training metric introduced by -# Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a +# Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a # classical kernel. # # -------------- @@ -581,7 +581,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that # :math:`g` measures *potential*, not realized performance. # -# - :math:`g`**'s value is in ruling out unpromising kernels.** +# - :math:`g` **value is in ruling out unpromising kernels.** # Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical # methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a # classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json index 21de723e27..d4c29d53f9 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -5,7 +5,7 @@ "username": "andynader" } ], - "executable_stable": false, + "executable_stable": true, "executable_latest": false, "dateOfPublication": "2025-10-06T10:00:00+00:00", "dateOfLastModification": "2025-10-06T10:00:00+00:01", From 441edb1dbdfc275f84eff25f1efd2db8561e088d Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 13:31:54 -0400 Subject: [PATCH 08/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 73 ++----------------- 1 file changed, 5 insertions(+), 68 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 2244126a13..ccec51151a 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -125,10 +125,6 @@ ###################################################################### # -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_1_1.png -# :align: center -# :width: 80% -# # Quantum kernels: fidelity-based and projected variants # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # @@ -218,18 +214,6 @@ def embedding_E2(features): ###################################################################### # -# E1 Embedding Circuit: -# -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_1.png -# :align: center -# :width: 80% -# -# E2 Embedding Circuit: -# -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_3_2.png -# :align: center -# :width: 80% -# # Gram Matrix Computation # ~~~~~~~~~~~~~~~~~~~~~~~ # @@ -386,12 +370,6 @@ def pqk_kernel_matrix(X, embedding_func): print("- Brighter colors = higher similarity") print("- Different patterns indicate different geometries") -###################################################################### -# -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_6_1.png -# :align: center -# :width: 80% - ###################################################################### # # We then compute the practitioner’s metric :math:`g` for each quantum kernel, according to the formula used in the paper by Huang et al. @@ -535,10 +513,6 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): ###################################################################### # -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_10_1.png -# :align: center -# :width: 80% -# # Our test results reveal an important subtlety: # **A higher geometric difference** :math:`g` **does not guarantee better classification accuracy.** # @@ -667,17 +641,9 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): y_test_eng = y_engineered[len(y_train) :] ###################################################################### -# .. rst-class:: sphx-glr-script-out # -# .. code-block:: none -# -# Recomputing QK‑E1 and classical kernel on full dataset... -# Generating engineered labels to favor QK‑E1... -# ✅ Engineered labels generated successfully. -# Class 0: 150 -# Class 1: 150 - # We plot the newly re-engineered dataset for PQK-E1 +# plt.figure(figsize=(4, 4)) plt.scatter( @@ -701,11 +667,9 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): ###################################################################### # -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_15_1.png -# :align: center -# :width: 80% - # We train SVMs using each kernel and compare test accuracy on the new engineered labels. +# + from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.metrics.pairwise import rbf_kernel @@ -756,29 +720,9 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): print(f"{model:>15}: {acc:.4f}") ###################################################################### -# .. rst-class:: sphx-glr-script-out -# -# .. code-block:: none -# -# Training SVM with Classical RBF kernel... -# Test accuracy: 0.6111 -# Training SVM with Quantum E1 kernel... -# Test accuracy: 0.8667 -# Training SVM with Quantum E2 kernel... -# Test accuracy: 0.6222 -# Training SVM with PQK E1 kernel... -# Test accuracy: 0.8667 -# Training SVM with PQK E2 kernel... -# Test accuracy: 0.7444 -# -# --- Accuracy Comparison (Engineered Labels) --- -# Classical RBF: 0.6111 -# QK-E1: 0.8667 -# QK-E2: 0.6222 -# PQK-E1: 0.8667 -# PQK-E2: 0.7444 - +# # Accuracy Comparison — Side-by-Side Bars (Original vs Engineered) for PQK-E1 +# import matplotlib.pyplot as plt import numpy as np @@ -815,10 +759,3 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): plt.tight_layout() plt.show() - - -###################################################################### -# -# .. figure:: ../_static/demonstration_assets/Quantum_Kernel_Geometric_Difference_Post_Feedback/Quantum_Kernel_Geometric_Difference_Post_Feedback_c_17_1.png -# :align: center -# :width: 80% From 42767179cfa67d53465821d321623eb449428578 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 14:20:50 -0400 Subject: [PATCH 09/31] this should fix all the formatting issues --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index ccec51151a..5eb3af3309 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -520,6 +520,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # :math:`g` than PQK‑E1. # # This highlights a key message from the paper: +# # - The role of :math:`g` is *not* to predict which kernel will perform best on a given task, but # rather to obtain a collection of kernels that have the *potential* to offer an advantage. # @@ -555,7 +556,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that # :math:`g` measures *potential*, not realized performance. # -# - :math:`g` **value is in ruling out unpromising kernels.** +# - :math:`g`\ ’s **value is in ruling out unpromising kernels.** # Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical # methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a # classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may From d7ae27bb1af02ae41952caf4d78b94c2da4f9e01 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Tue, 7 Oct 2025 15:46:16 -0400 Subject: [PATCH 10/31] Update demo.py --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 5eb3af3309..45a5b5ca31 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -542,8 +542,6 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a # classical kernel. # -# -------------- -# # 🔑 Key takeaways: # ~~~~~~~~~~~~~~~~~ # From 6e9401b5d0b66538baf80e94e75803f04db04cf6 Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Thu, 9 Oct 2025 15:11:23 -0400 Subject: [PATCH 11/31] Address reviewer feedback: add circuit titles and improve Gram matrix layout --- .../huang_geometric_kernel_difference/demo.py | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 45a5b5ca31..bb0bc782cd 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -204,14 +204,18 @@ def embedding_E2(features): qml.IQPEmbedding(features, wires=range(n_features)) -print("E1 Embedding Circuit:") +# E1 circuit with a visible title fig, ax = qml.draw_mpl(embedding_E1)(np.zeros(n_qubits)) +fig.suptitle("E1 Embedding Circuit", fontsize=12, y=0.98) plt.show() -print("\nE2 Embedding Circuit:") +# E2 circuit with a visible title fig, ax = qml.draw_mpl(embedding_E2)(np.zeros(n_qubits)) +fig.suptitle("E2 Embedding Circuit", fontsize=12, y=0.98) plt.show() + + ###################################################################### # # Gram Matrix Computation @@ -349,23 +353,32 @@ def pqk_kernel_matrix(X, embedding_func): # Visualize first 20x20 subset of each Gram matrix for clarity subset_size = 20 -fig, axes = plt.subplots(1, 5, figsize=(20, 4)) - matrices = [K_classical, K_quantum_E1, K_quantum_E2, K_pqk_E1, K_pqk_E2] titles = ["Classical RBF", "QK-E1", "QK-E2", "PQK-E1", "PQK-E2"] +rows, cols = 2, 3 +fig, axes = plt.subplots(rows, cols, figsize=(14, 8)) + +# Flatten axes array to loop over +axes = axes.flatten() + for i, (K, title) in enumerate(zip(matrices, titles)): im = axes[i].imshow(K[:subset_size, :subset_size], cmap="viridis", aspect="equal") axes[i].set_title(title) axes[i].set_xlabel("Sample index") - if i == 0: + if i % cols == 0: # only first column gets a y-label axes[i].set_ylabel("Sample index") plt.colorbar(im, ax=axes[i], fraction=0.046) +# Hide any unused subplots (since we have 5 matrices, but a 2x3 grid = 6 spots) +for j in range(len(matrices), len(axes)): + axes[j].axis("off") + plt.tight_layout() plt.suptitle(f"Gram Matrix Visualizations (first {subset_size}×{subset_size} entries)", y=1.02) plt.show() + print("Each matrix shows how similar data points are to each other:") print("- Brighter colors = higher similarity") print("- Different patterns indicate different geometries") From 3538ccccf4d9928f29424da46887d2f5fc745a80 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 20 Oct 2025 17:34:21 -0400 Subject: [PATCH 12/31] formatting --- .../huang_geometric_kernel_difference/demo.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index bb0bc782cd..6ce305b155 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -15,14 +15,14 @@ where :math:`K_q` and :math:`K_c` are quantum and classical Gram matrices, respectively. -Kernel Refresher +Kernel refresher ---------------- A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without explicitly computing their feature representations in high-dimensional spaces, thus lowering the computational cost. -Classical Kernels +Classical kernels ~~~~~~~~~~~~~~~~~ - Example: RBF (Radial Basis Function) kernel @@ -30,7 +30,7 @@ - Implicitly computes: :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle` - The feature map :math:`\phi(x)` projects to infinite dimensions but is never calculated directly -Quantum Kernels +Quantum kernels ~~~~~~~~~~~~~~~ - Formula: :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2` @@ -38,7 +38,7 @@ - For :math:`n` qubits, the quantum state lives in a :math:`2^n`-dimensional Hilbert space that is implicitly manipulated -Key Concepts +Key concepts ~~~~~~~~~~~~ The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store @@ -218,7 +218,7 @@ def embedding_E2(features): ###################################################################### # -# Gram Matrix Computation +# Gram matrix computation # ~~~~~~~~~~~~~~~~~~~~~~~ # # Using the kernels defined above, we now build the **Gram (kernel) matrices** required to compute the @@ -543,7 +543,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # potential solution because it offers a potential for an improvement on our classification problem. # This way, we have an important diagnostic tool to filter out bad quantum kernels for our data. # -# 🧠 Conclusion: A Practical Perspective on the Geometric Difference :math:`g` +# 🧠 Conclusion: A practical perspective on the Geometric difference :math:`g` # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # In this notebook, we explored a fundamental question in quantum machine learning: @@ -573,7 +573,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may # be possible* — not that it will be realized. # -# Appendix: What if We Take the Labels Into Account? +# Appendix: What if we take the labels into account? # -------------------------------------------------- # # The cells above explore the importance of :math:`g` in a practical setting. However, as an appendix, @@ -589,7 +589,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # labels, but it’s good for getting more intuition about the role of :math:`g` and why it sometimes # fails in predicting performance. # -# How Label Re-engineering Works: +# How label re-engineering works: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Given :math:`K_Q` and :math:`K_C`, the process generates new labels that maximize quantum kernel @@ -733,7 +733,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): ###################################################################### # -# Accuracy Comparison — Side-by-Side Bars (Original vs Engineered) for PQK-E1 +# Accuracy comparison — side-by-side bars (Original vs Engineered) for PQK-E1 # import matplotlib.pyplot as plt From 82b72fbcedc9591f71634144bd0edbe9d248bd0c Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:06:13 -0400 Subject: [PATCH 13/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 6ce305b155..0170f1ee50 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -86,7 +86,7 @@ 3. **Our approach**: Calculate :math:`g` values between the classical kernel and each quantum kernel—giving us an immediate assessment of which quantum approaches might be worth pursuing - across any kernel-based method + across any kernel-based method. """ # We first start by generating and visualizing the artificial data From d4708dcd72dcc96a429ca6b7e51f1e7206a13223 Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:06:35 -0400 Subject: [PATCH 14/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 0170f1ee50..a4793cce0f 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -73,7 +73,7 @@ Demonstration setup: -------------------- -1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn`` +1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn``. 2. **Five kernels to compare**: - **Classical baseline**: Gaussian-RBF kernel From adf19b51a238888d13e3717368f2863763c343f9 Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:10:40 -0400 Subject: [PATCH 15/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index a4793cce0f..30bb26a426 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -232,7 +232,7 @@ def embedding_E2(features): # Each entry :math:`K_{ij}` measures how similar two data points are, and the full matrix :math:`K` # provides a **global view** of the data in the kernel’s feature space. # -# We compute five such matrices, one for each kernel defined above: +# We compute five such matrices, one for each kernel defined above. # # :math:`K_{\text{RBF}}` obtained from: # From f92751a37db21a986d20aa7382df246c9a884c9b Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:11:21 -0400 Subject: [PATCH 16/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 30bb26a426..e509aefb09 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -438,7 +438,7 @@ def compute_g(K_classical, K_quantum, eps=1e-7): # # This intuition is understandable — after all, a larger :math:`g` suggests that the quantum kernel # perceives the data very differently from a classical one. -# But as we’ll see, **a higher :math:`g` doesn’t always translate into better accuracy, it just +# But as we’ll see, **a higher** :math:`g` **doesn’t always translate into better accuracy, it just # means there’s higher potential for an improvement over the classical model**. # # In fact, a higher :math:`g` can sometimes correspond to worse performance on the original task. From d781bfe6dd05a45978659fef4c52052c2315c13d Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:11:41 -0400 Subject: [PATCH 17/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index e509aefb09..4e1a459f87 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -543,7 +543,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # potential solution because it offers a potential for an improvement on our classification problem. # This way, we have an important diagnostic tool to filter out bad quantum kernels for our data. # -# 🧠 Conclusion: A practical perspective on the Geometric difference :math:`g` +# 🧠 Conclusion: A practical perspective on the Geometric difference g # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # In this notebook, we explored a fundamental question in quantum machine learning: From 11ce2f85ea1e810fa89ef83ef1ac34cb6d7edb87 Mon Sep 17 00:00:00 2001 From: Andrew Nader Date: Wed, 29 Oct 2025 15:11:54 -0400 Subject: [PATCH 18/31] Update demonstrations_v2/huang_geometric_kernel_difference/demo.py Co-authored-by: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 4e1a459f87..f78ca7c1a8 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -551,7 +551,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # **Can we anticipate, before training, whether a quantum kernel might outperform a classical # one?** # -# To address this, we used the **geometric difference :math:`g`**, a pre-training metric introduced by +# To address this, we used the **geometric difference** :math:`g`, a pre-training metric introduced by # Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a # classical kernel. # From 1c54d6d5065ee6af57e58dd257b246114c9654eb Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:37:36 -0500 Subject: [PATCH 19/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 31 ++++++++----------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index f78ca7c1a8..eee9b99e7a 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -15,28 +15,23 @@ where :math:`K_q` and :math:`K_c` are quantum and classical Gram matrices, respectively. -Kernel refresher ----------------- -A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without -explicitly computing their feature representations in high-dimensional spaces, thus lowering the -computational cost. +.. admonition:: Kernel refresher + :class: note -Classical kernels -~~~~~~~~~~~~~~~~~ + A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without + explicitly computing their feature representations in high-dimensional spaces, thus lowering the + computational cost. -- Example: RBF (Radial Basis Function) kernel -- Formula: :math:`k(x, x') = \exp(-\gamma \|x - x'\|^2)` -- Implicitly computes: :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle` -- The feature map :math:`\phi(x)` projects to infinite dimensions but is never calculated directly + An example of a classical kernel is the Radial Basis Function (RBF) kernel given by + :math:`k(x, x') = \exp(-\gamma \|x - x'\|^2)`. It implicitly computes the inner product + :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle`. The feature map :math:`\phi(x)` projects + to infinite dimensions, but it is never calculated directly. -Quantum kernels -~~~~~~~~~~~~~~~ - -- Formula: :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2` -- :math:`|\psi(x)\rangle` is the quantum state encoding the classical data :math:`x` -- For :math:`n` qubits, the quantum state lives in a :math:`2^n`-dimensional Hilbert space that is - implicitly manipulated + Quantum kernels are similar but leverage the Hilbert space of a quantum computer. It is defined by + :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2`, where :math:`|\psi(x)\rangle` is the quantum + state encoding the classical data :math:`x`. For :math:`n` qubits, the quantum state lives in a + :math:`2^n`-dimensional Hilbert space that is implicitly manipulated. Key concepts ~~~~~~~~~~~~ From 576dba6838b8dc0b21bf4151303260c379ddf5d6 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Thu, 27 Nov 2025 17:04:10 -0500 Subject: [PATCH 20/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index eee9b99e7a..a3e0d83f9e 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -28,30 +28,25 @@ :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle`. The feature map :math:`\phi(x)` projects to infinite dimensions, but it is never calculated directly. - Quantum kernels are similar but leverage the Hilbert space of a quantum computer. It is defined by + Quantum kernels are similar but leverage the Hilbert space of a quantum computer. A quantum kernel is defined by :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2`, where :math:`|\psi(x)\rangle` is the quantum state encoding the classical data :math:`x`. For :math:`n` qubits, the quantum state lives in a :math:`2^n`-dimensional Hilbert space that is implicitly manipulated. -Key concepts -~~~~~~~~~~~~ - -The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store +**Key concepts**: The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store all pairwise similarities between data points. -What g tells us: ------------------------- - -- **If** :math:`g \approx 1:` The quantum kernel’s geometry is essentially the same as a good classical - kernel’s → the quantum kernel offers no geometric advantage, making it unlikely to outperform the - classical kernel **in any kernel-based learning algorithm** (e.g., SVM, Gaussian Processes). Huang - et al. proved this concept in a rigorous mathematical way in their paper. +What g tells us +--------------- -- **If** :math:`g >> 1:` The quantum geometry is genuinely different → **a kernel method using the - quantum kernel** *might* offer an advantage. +When :math:`g \approx 1:`, the quantum kernel’s geometry is essentially the same as a good classical +kernel’s. The quantum kernel offers no geometric advantage, making it unlikely to outperform the +classical kernel **in any kernel-based learning algorithm** (e.g., SVM, Gaussian Processes). Huang +et al. proved this concept in a rigorous mathematical way in their paper. Conversely, if :math:`g >> 1:`, +the quantum geometry is genuinely different. A kernel method using the quantum kernel *might* offer an advantage. -Why this matters: ------------------ +Why this matters +---------------- This approach focuses on ruling out underperforming quantum kernels before investing in training. From a complexity theory point of view, computing :math:`g` scales as :math:`O(n^3)` due to the From 72a9c563db407bff44409a3f713f09526f8094bc Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Sun, 30 Nov 2025 16:32:19 -0500 Subject: [PATCH 21/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 55 ++++++------------- 1 file changed, 17 insertions(+), 38 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index a3e0d83f9e..8bceb0a36e 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -118,11 +118,11 @@ # Quantum kernels: fidelity-based and projected variants # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# We consider **five different kernels**, derived from three sources: a classical RBF kernel and two -# quantum embedding circuits — **E1** and **E2**. +# We consider **five different kernels** derived from three sources: a classical RBF kernel and two +# quantum embedding circuits—**E1** and **E2**. # Each kernel defines a different geometry for measuring similarity between data points. # -# - **RBF – Classical radial basis function kernel** +# - **RBF – Classical radial basis function kernel.** # A classical baseline defined as: # # .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) @@ -131,29 +131,29 @@ # ones become nearly orthogonal. # It captures a **geometric**, distance-based notion of similarity in input space. # -# - **E1 – Separable RX rotations** +# - **E1 – Separable RX rotations.** # Each input feature :math:`x_j` is encoded into a single qubit using an :math:`RX(x_j)` gate. # The circuit is fully separable (no entanglement), producing the quantum state # :math:`\lvert \psi_{\text{E1}}(x) \rangle`. # -# - **E2 – IQP embedding** +# - **E2 – IQP embedding.** # PennyLane’s ``qml.IQPEmbedding`` applies Hadamards, parameterized :math:`RZ(x_j)` rotations, and # entangling ZZ gates. # This creates an entangled quantum state :math:`\lvert \psi_{\text{E2}}(x) \rangle`, inspired by # Instantaneous Quantum Polynomial (IQP) circuits. # -# - **QK – Standard quantum kernels** +# - **QK – Standard quantum kernels.** # For both E1 and E2, the kernel is defined by the **fidelity** between quantum states: # -# .. math:: k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 +# .. math:: k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2, # -# .. math:: k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 +# .. math:: k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2, # # where :math:`\psi_{\text{E1}}(x)` and :math:`\psi_{\text{E2}}(x)` are the quantum states generated # by E1 and E2 respectively. These kernels reflect how aligned two quantum feature states are in # Hilbert space. # -# - **PQK – Projected quantum kernels (PQK-E1 / PQK-E2)** +# - **PQK – Projected quantum kernels (PQK-E1 / PQK-E2).** # For a projected quantum kernel, instead of computing fidelity, the output quantum state # :math:`|\psi(x)\rangle` # is **measured** to extract the expectation values of Pauli operators: @@ -162,16 +162,16 @@ # # A classical **RBF kernel** is then applied to these real-valued vectors: # -# .. math:: k_{\text{PQK}}(x, x') = \exp\left( -\gamma \| v(x) - v(x') \|^2 \right) +# .. math:: k_{\text{PQK}}(x, x') = \exp\left( -\gamma \| v(x) - v(x') \|^2 \right). # # We obtain two different projected quantum kernels from E1 and E1: # -# .. math:: k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) +# .. math:: k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right), # -# .. math:: k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) +# .. math:: k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right), # # where :math:`v_{\text{E1 }}(x)` and :math:`v_{\text{E2}}(x)` are the Pauli expectation vector from -# E1 and E2 respectively. +# E1 and E2, respectively. # # We define the embedding circuits E1 and E2, and we visualize them. @@ -224,27 +224,7 @@ def embedding_E2(features): # # We compute five such matrices, one for each kernel defined above. # -# :math:`K_{\text{RBF}}` obtained from: -# -# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) -# -# :math:`K_{\text{QK-E1}}` obtained from: -# -# .. math:: k_{\text{QK-E1}}(x, x') = |\langle \psi_{\text{E1}}(x) \mid \psi_{\text{E1}}(x') \rangle|^2 -# -# :math:`K_{\text{QK-E2}}` obtained from: -# -# .. math:: k_{\text{QK-E2}}(x, x') = |\langle \psi_{\text{E2}}(x) \mid \psi_{\text{E2}}(x') \rangle|^2 -# -# :math:`K_{\text{PQK-E1}}` obtained from: -# -# .. math:: k_{\text{PQK-E1}}(x, x') = \exp\left( -\gamma \|v_{\text{E1}}(x) - v_{\text{E1}}(x')\|^2 \right) -# -# :math:`K_{\text{PQK-E2}}` obtained from: -# -# .. math:: k_{\text{PQK-E2}}(x, x') = \exp\left( -\gamma \|v_{\text{E2}}(x) - v_{\text{E2}}(x')\|^2 \right) -# -# The gram matrices will be used in downstream evaluations to compare kernel geometries and analyze +# The Gram matrices will be used in downstream evaluations to compare kernel geometries and analyze # expressivity and generalization metrics like :math:`g`. # @@ -426,7 +406,7 @@ def compute_g(K_classical, K_quantum, eps=1e-7): # classification performance, which might lead us to believe, for example, that in terms of final # accuracy, the ranking will also be PQK-E1 > PQK-E2 > QK-E1 > QK-E2. # -# This intuition is understandable — after all, a larger :math:`g` suggests that the quantum kernel +# This intuition is understandable—after all, a larger :math:`g` suggests that the quantum kernel # perceives the data very differently from a classical one. # But as we’ll see, **a higher** :math:`g` **doesn’t always translate into better accuracy, it just # means there’s higher potential for an improvement over the classical model**. @@ -518,7 +498,6 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # # Our test results reveal an important subtlety: # **A higher geometric difference** :math:`g` **does not guarantee better classification accuracy.** -# # For instance, **PQK‑E2** achieved perfect test accuracy (:math:`100\%`), despite having a lower # :math:`g` than PQK‑E1. # @@ -534,7 +513,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # This way, we have an important diagnostic tool to filter out bad quantum kernels for our data. # # 🧠 Conclusion: A practical perspective on the Geometric difference g -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# -------------------------------------------------------------------- # # In this notebook, we explored a fundamental question in quantum machine learning: # @@ -546,7 +525,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # classical kernel. # # 🔑 Key takeaways: -# ~~~~~~~~~~~~~~~~~ +# ----------------- # # - :math:`g` **is a diagnostic, not a performance predictor.** # A large :math:`g` indicates that the quantum kernel induces a very different geometry from the From 4d89a8c351c4202103a9ff26c466859302357ba2 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Sun, 30 Nov 2025 16:36:28 -0500 Subject: [PATCH 22/31] Update demo.py --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 8bceb0a36e..cb8a3219d4 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -515,10 +515,8 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # 🧠 Conclusion: A practical perspective on the Geometric difference g # -------------------------------------------------------------------- # -# In this notebook, we explored a fundamental question in quantum machine learning: -# -# **Can we anticipate, before training, whether a quantum kernel might outperform a classical -# one?** +# In this notebook, we explored a fundamental question in quantum machine learning: **Can we anticipate, +# before training, whether a quantum kernel might outperform a classical one?** # # To address this, we used the **geometric difference** :math:`g`, a pre-training metric introduced by # Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a From a3927db577cbf768f1cd22bd161e809485d52e47 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 10:24:07 -0500 Subject: [PATCH 23/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index cb8a3219d4..7eef24f5ce 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -60,8 +60,8 @@ not your algorithm choice, and not a hyperparameter issue. The kernel is fundamentally limited compared to classical kernels on this specific dataset. -Demonstration setup: --------------------- +Demonstration setup +------------------- 1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn``. 2. **Five kernels to compare**: @@ -105,7 +105,7 @@ print(f"Train size: {X_train.shape[0]} Test size: {X_test.shape[0]}") # visualize it using a scatter plot -plt.figure(figsize=(4, 4)) +plt.figure(figsize=(3, 3)) plt.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], s=15, alpha=0.8, label="class 0") plt.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], s=15, alpha=0.8, label="class 1") plt.axis("equal") @@ -396,7 +396,7 @@ def compute_g(K_classical, K_quantum, eps=1e-7): ###################################################################### # What does a high g really tell us? -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We can see that in terms of :math:`g`: # @@ -522,8 +522,8 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a # classical kernel. # -# 🔑 Key takeaways: -# ----------------- +# 🔑 Key takeaways +# ---------------- # # - :math:`g` **is a diagnostic, not a performance predictor.** # A large :math:`g` indicates that the quantum kernel induces a very different geometry from the From 23efe33573d2026df62c3bceb31c85ee93b49ee8 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 11:01:49 -0500 Subject: [PATCH 24/31] Update demo.py --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 7eef24f5ce..ddb430b39f 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -105,7 +105,7 @@ print(f"Train size: {X_train.shape[0]} Test size: {X_test.shape[0]}") # visualize it using a scatter plot -plt.figure(figsize=(3, 3)) +plt.figure(figsize=(4, 4)) plt.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], s=15, alpha=0.8, label="class 0") plt.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], s=15, alpha=0.8, label="class 1") plt.axis("equal") From 103654f0abad6e429beef98968534b7044d91eea Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 14:21:08 -0500 Subject: [PATCH 25/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index ddb430b39f..6533e7a084 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -39,10 +39,10 @@ What g tells us --------------- -When :math:`g \approx 1:`, the quantum kernel’s geometry is essentially the same as a good classical +When :math:`g \approx 1`, the quantum kernel’s geometry is essentially the same as a good classical kernel’s. The quantum kernel offers no geometric advantage, making it unlikely to outperform the classical kernel **in any kernel-based learning algorithm** (e.g., SVM, Gaussian Processes). Huang -et al. proved this concept in a rigorous mathematical way in their paper. Conversely, if :math:`g >> 1:`, +et al. proved this concept in a rigorous mathematical way in their paper. Conversely, if :math:`g >> 1`, the quantum geometry is genuinely different. A kernel method using the quantum kernel *might* offer an advantage. Why this matters @@ -63,6 +63,9 @@ Demonstration setup ------------------- +The following demonstration is designed to perform a pre-screening test on several +kernel methods—both classical and quantum—on a simple classification task. + 1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn``. 2. **Five kernels to compare**: @@ -125,7 +128,7 @@ # - **RBF – Classical radial basis function kernel.** # A classical baseline defined as: # -# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2) +# .. math:: k_{\text{RBF}}(x, x') = \exp(-\gamma \|x - x'\|^2). # # This maps data into an infinite-dimensional space where closer inputs remain close, and distant # ones become nearly orthogonal. @@ -212,25 +215,21 @@ def embedding_E2(features): # ~~~~~~~~~~~~~~~~~~~~~~~ # # Using the kernels defined above, we now build the **Gram (kernel) matrices** required to compute the -# practitioner’s metric :math:`g`. -# -# For a dataset of :math:`N` samples and a kernel function :math:`k(\cdot, \cdot)`, the Gram matrix +# practitioner’s metric :math:`g`. For a dataset of :math:`N` samples and a kernel function :math:`k(\cdot, \cdot)`, the Gram matrix # :math:`K \in \mathbb{R}^{N \times N}` is defined entrywise as: # -# .. math:: K_{ij} = k(x_i, x_j) +# .. math:: K_{ij} = k(x_i, x_j). # # Each entry :math:`K_{ij}` measures how similar two data points are, and the full matrix :math:`K` -# provides a **global view** of the data in the kernel’s feature space. -# +# provides a **global view** of the data in the kernel’s feature space. # We compute five such matrices, one for each kernel defined above. # # The Gram matrices will be used in downstream evaluations to compare kernel geometries and analyze # expressivity and generalization metrics like :math:`g`. # - # The following code builds all five Gram (kernel) matrices: Classical, QK-E1, QK-E2, PQK-E1, PQK-E2 -from sklearn.metrics.pairwise import rbf_kernel +from sklearn.metrics.pairwise import rbf_kernel # ---------------------------------------------------------------------------# # Classical RBF Gram matrix # @@ -317,8 +316,8 @@ def pqk_kernel_matrix(X, embedding_func): ###################################################################### # +# Let's now visualize the Gram Matrices -# Visualizing the Gram Matrices import matplotlib.pyplot as plt # Visualize first 20x20 subset of each Gram matrix for clarity @@ -398,9 +397,7 @@ def compute_g(K_classical, K_quantum, eps=1e-7): # What does a high g really tell us? # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# We can see that in terms of :math:`g`: -# -# PQK-E1 > PQK-E2 > QK-E1 > QK-E2. +# We can see that in terms of :math:`g`: PQK-E1 > PQK-E2 > QK-E1 > QK-E2. # # A common misconception is that a higher geometric difference :math:`g` automatically means better # classification performance, which might lead us to believe, for example, that in terms of final @@ -410,7 +407,6 @@ def compute_g(K_classical, K_quantum, eps=1e-7): # perceives the data very differently from a classical one. # But as we’ll see, **a higher** :math:`g` **doesn’t always translate into better accuracy, it just # means there’s higher potential for an improvement over the classical model**. -# # In fact, a higher :math:`g` can sometimes correspond to worse performance on the original task. # # Let’s see this in action. From d02d404bed00380f04f2da734e2f55002328f2b9 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 14:23:30 -0500 Subject: [PATCH 26/31] Update demo.py --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 6533e7a084..9458109500 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -523,10 +523,10 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # # - :math:`g` **is a diagnostic, not a performance predictor.** # A large :math:`g` indicates that the quantum kernel induces a very different geometry from the -# classical one — a *necessary*, but not *sufficient*, condition for quantum advantage. +# classical one—a *necessary*, but not *sufficient*, condition for quantum advantage. # # - **Higher** :math:`g` **does not imply higher accuracy.** -# In our results, **PQK‑E2** had a high :math:`g` and achieved perfect accuracy — but **PQK‑E1**, +# In our results, **PQK‑E2** had a high :math:`g` and achieved perfect accuracy—but **PQK‑E1**, # with a higher :math:`g`, obtained a lower accuracy on the original task. This confirms that # :math:`g` measures *potential*, not realized performance. # From cac8329b13291691d4f9c69fb577235b1a14cac4 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:27:13 -0500 Subject: [PATCH 27/31] Update demo.py --- .../huang_geometric_kernel_difference/demo.py | 61 +++++++++---------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index 9458109500..ff050dbd05 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -15,29 +15,11 @@ where :math:`K_q` and :math:`K_c` are quantum and classical Gram matrices, respectively. +The following demonstration is designed to perform a pre-screening test on several +kernel methods—both classical and quantum—on a simple classification task. -.. admonition:: Kernel refresher - :class: note - - A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without - explicitly computing their feature representations in high-dimensional spaces, thus lowering the - computational cost. - - An example of a classical kernel is the Radial Basis Function (RBF) kernel given by - :math:`k(x, x') = \exp(-\gamma \|x - x'\|^2)`. It implicitly computes the inner product - :math:`k(x, x') = \langle\phi(x), \phi(x')\rangle`. The feature map :math:`\phi(x)` projects - to infinite dimensions, but it is never calculated directly. - - Quantum kernels are similar but leverage the Hilbert space of a quantum computer. A quantum kernel is defined by - :math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2`, where :math:`|\psi(x)\rangle` is the quantum - state encoding the classical data :math:`x`. For :math:`n` qubits, the quantum state lives in a - :math:`2^n`-dimensional Hilbert space that is implicitly manipulated. - -**Key concepts**: The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store -all pairwise similarities between data points. - -What g tells us ---------------- +What g tells us and why it is important +--------------------------------------- When :math:`g \approx 1`, the quantum kernel’s geometry is essentially the same as a good classical kernel’s. The quantum kernel offers no geometric advantage, making it unlikely to outperform the @@ -45,10 +27,7 @@ et al. proved this concept in a rigorous mathematical way in their paper. Conversely, if :math:`g >> 1`, the quantum geometry is genuinely different. A kernel method using the quantum kernel *might* offer an advantage. -Why this matters ----------------- - -This approach focuses on ruling out underperforming quantum kernels before investing in training. +The approach presented in this demo focuses on ruling out underperforming quantum kernels before investing in training. From a complexity theory point of view, computing :math:`g` scales as :math:`O(n^3)` due to the matrix inversion, and the most expensive training algorithms such as Gaussian Processes also scale as :math:`O(n^3)` so we might think we are not saving any computational time. However, from a @@ -60,12 +39,31 @@ not your algorithm choice, and not a hyperparameter issue. The kernel is fundamentally limited compared to classical kernels on this specific dataset. + +Background context: kernels +--------------------------- + +A **kernel** is a function :math:`k(x, x')` that measures similarity between data points without +explicitly computing their feature representations in high-dimensional spaces, thus lowering the +computational cost. + +An example of a classical kernel is the Radial Basis Function (RBF) kernel given by +:math:`k(x, x') = \exp(-\gamma \|x - x'\|^2)`. It implicitly computes the inner product +:math:`k(x, x') = \langle\phi(x), \phi(x')\rangle`. The feature map :math:`\phi(x)` projects +to infinite dimensions, but it is never calculated directly. + +Quantum kernels are similar but leverage the Hilbert space of a quantum computer. A quantum kernel is defined by +:math:`k(x, x') = |\langle\psi(x)|\psi(x')\rangle|^2`, where :math:`|\psi(x)\rangle` is the quantum +state encoding the classical data :math:`x`. For :math:`n` qubits, the quantum state lives in a +:math:`2^n`-dimensional Hilbert space that is implicitly manipulated. + +**Key concept**: The **kernel matrix** (Gram matrix) :math:`K` has entries :math:`K_{ij} = k(x_i, x_j)` that store +all pairwise similarities between data points. + + Demonstration setup ------------------- -The following demonstration is designed to perform a pre-screening test on several -kernel methods—both classical and quantum—on a simple classification task. - 1. **Dataset**: Synthetic two-moons data generated with ``scikit-learn``. 2. **Five kernels to compare**: @@ -516,10 +514,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # # To address this, we used the **geometric difference** :math:`g`, a pre-training metric introduced by # Huang et al. that quantifies how *differently* a quantum kernel organizes the data compared to a -# classical kernel. -# -# 🔑 Key takeaways -# ---------------- +# classical kernel. The main takeaways from this demonstration are: # # - :math:`g` **is a diagnostic, not a performance predictor.** # A large :math:`g` indicates that the quantum kernel induces a very different geometry from the From fe4e79272d5d2ad70650d5365552e26098de4296 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:47:31 -0500 Subject: [PATCH 28/31] Update metadata.json --- .../metadata.json | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json index d4c29d53f9..0616e2dadc 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -36,5 +36,22 @@ "2011.01938" ], "referencedByPapers": [], - "relatedContent": [] + "relatedContent": [ + { + "type": "demonstration", + "id": "tutorial_kernel_based_training", + "weight": 1.0 + }, + { + "type": "demonstration", + "id": "tutorial_kernels_module", + "weight": 1.0 + }, + { + "type": "demonstration", + "id": "tutorial_classical_kernels", + "weight": 1.0 + } + + ] } From 63b1b072715b0402c573e0bb14a8ae800aa08c26 Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:48:05 -0500 Subject: [PATCH 29/31] Update demo.py --- demonstrations_v2/huang_geometric_kernel_difference/demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/demo.py b/demonstrations_v2/huang_geometric_kernel_difference/demo.py index ff050dbd05..ecc4b10c40 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/demo.py +++ b/demonstrations_v2/huang_geometric_kernel_difference/demo.py @@ -529,7 +529,7 @@ def train_evaluate_svm(K_train, K_test, y_train, y_test, name): # Kernels with very small :math:`g` are unlikely to offer any meaningful advantage over classical # methods—meaning the quantum kernel introduces no genuinely new distinctions beyond what a # classical RBF can produce. By contrast, a high :math:`g` only tells us that *some advantage may -# be possible* — not that it will be realized. +# be possible*—not that it will be realized. # # Appendix: What if we take the labels into account? # -------------------------------------------------- From 49ebde77533fe8c523a606c831b76aa63826ff9b Mon Sep 17 00:00:00 2001 From: Daniela Angulo <42325731+daniela-angulo@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:59:17 -0500 Subject: [PATCH 30/31] adding thumbnails --- ...metric-kernel-difference-large-thumbnail.png | Bin 0 -> 23046 bytes ...mo-geometric-kernel-difference-thumbnail.png | Bin 0 -> 5516 bytes .../huang_geometric_kernel_difference/demo.py | 4 ++-- .../metadata.json | 12 ++++++++---- 4 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 _static/demo_thumbnails/large_demo_thumbnails/pennylane-demo-geometric-kernel-difference-large-thumbnail.png create mode 100644 _static/demo_thumbnails/regular_demo_thumbnails/pennylane-demo-geometric-kernel-difference-thumbnail.png diff --git a/_static/demo_thumbnails/large_demo_thumbnails/pennylane-demo-geometric-kernel-difference-large-thumbnail.png b/_static/demo_thumbnails/large_demo_thumbnails/pennylane-demo-geometric-kernel-difference-large-thumbnail.png new file mode 100644 index 0000000000000000000000000000000000000000..61b7c4a20506d5ea906b47a22407ae19df67e5a6 GIT binary patch literal 23046 zcmcG#cQ9Q4+c&;M@11Btl<0)jO9T-;h_(9aEqaUIdyC#l)Zkzdz4uNKOZ470N`#31 z$mjFS@4kOC&ph+|aersd%-E0QnX`M|SJWFd1$-Q891sYEulQO{69mG9fWpihs zC+vj;UuRtcJ|PhiA>Q$7-S@pCT6%^LS@|&C*L>W}`Kg|h-Pvu!6FP>*oIHFC^i&Ee z>Z@N%?-%?>=2z}F(t=|Ws*6G_-Z|dymgSdMzfqD%2)DaOI6GOtz5mg4zZ`wPU){ER zc)yjetZVF-l2<*sT-Y_i%Ejv(79U+u1+{Y2s71*Jfpi}$%E`Qazwl$05d{Ro>f*z@ z<Q{iBOam0O%_-1VqFM5{x1T_2Gb2P0|0VlK-bvv_9@!4}5|6 zhE~0)#g&-W6af$e1oWPS6BJ1f%6rIYiXO;TgBl1$`5#v?{62VCNe~G3q4QkOBSlXA zA8cL!VmtX4w#2`%{{yP9`3kcK=0SlncZivJ4HA_9INtw8V2#co>R;#~Hs9Ks$WO(g@Q3~sGZsd!9o{ZJ%mTiL)L<^)?5Ojp#{5F*vGRGV8ug!Ge4ZiNcrLq zsYvj10L$bJymE%`SdRDZkw zH5=-}?(~`y6dO2elG{+51v!u&^_#{4nVC65t@Jv=jEYu1+P-`c6q?#Y&312fB_mHo zZO{XWYrufR^GM*%{05?j#0ItHAzNs6VW0GIc42k*0{6S52uvU%c{YS26&L%-D(HP` zroMO>;xwJ4a!&2g~|(pzwKf`W2%y3n1Lc9mWYZPaO0j26;yzE3|VRCAG=|tRUAN zBdRSsxfO;camu129pAdNrDQn+Bbxi`MP~&Py~}O4?RQ-aHQ%u|)Ug4phRYrIpNp?g zKL}TX=!w-y_pusV=f;UG(=AxwK#!3jmr6#&EBOPt0Q^ajqcxx5Ux6P>hlCk^*Gz1>Ut<{T$C=1bUi0 z?&Fig>A@Ozhi!uOnuv2uAJ4Qx5j=Slk<(NTLcGUVjIl3?WTI|);Wi9-syJ`vly9zm z?cgGPo=&ktvxObtgV%EAVvPd@y_3=JFg=pdo_`=DMCiI6`A%x@Hk9dQ$Q4Z~15=e7 z2g&OUO`4qIP&@UDmtV@2S~ECx>z!MTPG>K=I$1ZopRU3x5FGH4ZbVu-)D6ok8sUfP-uY5rE1S{X>G$~?MIbNj4j)? ztCyD9dS+^OLR}ch>4^b?5{`O~m&^v2OA;NiqXd>HzBF$_>Kp2&*Ugn6iBA11_1u!w zB`5}%vLW>!xePRGqla=cFM0{Sube!+9E0VucI3CG*9JY5^!jf1V2ZgoL6{-rTrc$G z@`s<$R~*SSrfEyrA?;lVOWW6IJVfST{VieKdcYsK(6+N3Sez=F93CQgfe0`s$NuiV zA6M$9d^p!#7l}y)Akh~85$i}XE%$5VolBxlEd^M(AszkG+?@WSIaQHco16?DuC|U%36XTvNwu{O$`5 z^V*)^`W-B4A<_@rjabhhNj4RZ?1GcVK#$x7%Jdr&A4eTLD=j9yn;6jNu&b!Fv^ z1r?TS&TlcWLu0;*rQcn)ro)aj`wQ8@JL`{(rG1X_yPek zec!+x_U2fQDMS3^nR=$((2nV-?O30X0oX45!&bjNN^Bs+8Ce0uzRwxQw_Jnky9nL{ zFzC+@zqWu7rx;;Tb~k1Es=?!bgx=vG2|1uzxkFEWA6X0QGDM>CeOEk?C)OqJYhfv( zvhlsgydi&A|Ms%R+tqTyvr?5eG9q_5vluZT$Jms*zdx))3Y1U+l#4)XWaPK^{F+z} zU|WKi&)ASfTtrB?q_Et=TY=?5M*q|1wDX`em>;+U<~~tsJMv8KTju!uKlyXsB6r3F!F@o?}cK4wOi4x4@eYec(cT#4Cnw$h%&dMQK z+6;+SNQ(5&F~DI}3{7 z0&gMbcPm?GVq$vhq0Zw@a)MHc6YOq(jos$%Z}XU#_*pAkx*MLgHW2`z8T-xY2TCQ% zo!S1`N{aq#n3}ql_iEIlTke&8QN5=J%e1aSE5%zm;m8@@}(BY(I* zoMb88c~~j5BI{=Z&#o_yuO6B-R2V>W9abtD3Y}Aa;)j|~5k!a(EtS1VL+Lyqe%wd~ zS=SSF*vFTGdQNlD=BECzU_k}vJJ)AqrBD0|AI8%6e>azaYl26jyL;2yRf zl+~Ep_A>`-8is3^TN?KX$C;h3EwOvATTk-vPmX$9t85}|Q!Ck~v@z=u|6|@}MpQv# zyf1Ks3z1W{6U5n|#i#GHGUN$n(3|?Iol&G0A|LZ1e8!Ewdk&9_h(r zgq)ClG5O7*b8-En*~r&=OLH!d#8^-zp{sdybQ-0blpPfe=`Vl0I{CgA@a;H%idIU# z@)ObH28zd6lOVtHvb4gbK5)if_f-lvWc79FVG3h=jjgZG_w6LDH2uh2iGt zgowjUeZWUld6^+UW+@+#m-4np`aJOq9rT@fT8lK)(<&8!#&q6%sbnC;2*E^3|DJGa zRHee|N>sP?|Mvd35PXjDZJZ6m$~}s(sH*kS&IH%L^{ycfA#biZoCl_;_Sm=GEmT$ zgIKpwCbv*d5YAQOMh`&`A|xa#qx)G2{2^KEgP*f-I@b74(paC7)D(qqt#f^Kl8t88y|Gr(P>`PFrOWZtM5XN!;GVdEZSf z$Q^h$eZZi(Vn9TxRz^Q6E|gx9s(*CL{wGdBW}ZF4so;A9lm<7^VNn6oE9RU`e9E<_ zYZtYnRQmVZaizGp)SXuy&Wdeh4piAK=&MgA9VKvj)SAtC?Dc{2h+%C%+Rp1@-I(*s zqa_xt7v?H&I8QVxjj65X4i4-!Uk<-OLljE> z3y-abzR#Qe>7v6$AHH1;J@I4Hy+(xABO%L2y5{g!BMCMW*jO5P+D+(DBcY_s{8RPa z$Ci|)?Ky_;g!B)?>!D>uFke62%pd&vNl_mgv$IhH6){l22wJv&|N(s~nG-zJ+dwYmuz;LVxZny1TslzRCzSPS1`g z>&&2U%@s8OVV+0_%SAJO9y0a207KP_n(_22J$iM4gk%S$41P=1fo{&Cj^ndG4mBB2 zpU4{#tYEk%J)>r8aUXRwkfEd1V`p3a)?+o#7V;_KjoPv$H$zYH!Z=1>xs(C$NRIaQ zp1aKGw%OF4nSu+8a~CVB;th?vZt~;hY5WhG**~rb`r8zo((MuUkk)lBM8waNJ%f|z z5bc3y;H}fqSdhg@%*dhW81@Mb_6>ANo^=q3A!qOI z{0!BP49yW_Y!|9=Sv;c*%}wNjT20`t^k8`tB?xgY3m?9lPWf8Fg;!xKeA8b}NzU2f zr$l(=P{N(qo}!y7+DVHh1ARIi?4Ab&x3Lyxa<^Y97&BVRU$N%q67v$!0Dp-j+JEr- zq#|~0QvywwuHIpJAOgAVWdNU`w8PdBu~oG>FIJrC#+9l#-U;UsZ(`cS5JfiV9ENv7 z$0K59Eg#oJ3?1|x{N)e1&0h1E8xfoHiJB>V{?Xx*PfNY3<*Kzn%$4wTPYPN|7dyom z6qw9shNNG(s;s2F4gYoh1tOL<1}ej*v@&J|i=+j>6~p^9;||mVmQ>3R zDT6fPu!xcFzuz_8dzeIK%tp=Dw2rT+Rv}Pl7<@oqAddgorb;reWXB#=!Rc;#|=XSjnm)AUHe>w;S)bkk;XE<&x6em}G<-p%Ii<3y8 zAI&d>_u!`%QZ%A>ysBc{SV6WnFE?Ut5x=>*`39TZRU#fR?LUy^;)Pu8l2wpb{Pjz{ z36IPKE1);MX{q@BxvjZZkM#5)v7@!ItQUg^Pm68hxj1N6YQ^UfCmiruLeHr_3 z=OkSv8nu|z@Lob?vCP&m%Zqf>Zhio%$NVj#@9KGB@H$a|HVEQ|hf zu2NH2Fr4!4MUn9yqrmChF&G--E_8eSe#RA3ETn^?>Lg>>i`{ z!LJ1z<83#L7p1Hkeirrg%rkX5CIhyms_(lk>04~RzpFhZN7hAxvq6awAq+Pb$9Ntz zVL^PfUPW6(3{BiYZrJ5Y+C<0qfvx=GCf_Ruu{Op8?>+ZFg?~u}8{1FD2GJ6@ebT2c zT*x}@!;sI8=gqEM{$RS&I+{Z>fGN?jH&D**^pz7KDT>!e{i2dhoi0#eJ(+fUgNoe1 z&+*Y|D2_v2lNAQqgQ@Dzl)^M6NF=xxifoMpYt)6IcJ-bp5wxa3FK-6ZXzc^HsCgtR z^%nU;$(PCWYUR(O&vSXUy!)taNB7?tcoK9aYvDi@yEPIE91tn7JnB1Jf zc0(f`PwCrDZ*AL{Re|EyHCcsOZek$%P*Ty6NYhH$w|Tr!Ca2y%1=&mt`sSU91br=U zff?uL{n4BbAff?r(RUIx3k<2Hg?>+CW-(IhY}tqBI1vJddk#DZq#;4e|F%17xp=G! zaEoSr>?<8I1*@vHU1+gb>3R2i+qW^7{7SN}$s(}FMREeBU(DJzz(nW_ufGPx`^qZYl{M z?|T1d)TV^H^wNg;Ex@Y4&It;hPwHKg13aFmi6b`hw*%t0H30J%hvzJ+TW8Vkx|=Kx zCSskm734e!8D%Lt5Mg&FHW%=9Ct>p*JHgd-ajy*|@nO`Qn&!@0$pKJ}5u89~ z0<*rTXNZwss5>v+cb0E@JS|1-3M9CpmC)6VNbr(Nh*M1#*}gI;ay^RH9C#(;Mb~;$3o&XQqKe(mc>(?(I1!q|_ue20R38^X;{Ju1FoRlX-1x0< zji=ajFeAkOxmQd@i5H4Xzina%S|C+J>^6cFdDf)#1**EUil@ea=$zt2Xk^OHb0u*? z1U_DqfHdFJakFUvVKj5{L=|j7?2TDS`)|?b3n9|M&{k(LjAl3q7b5)O(eVYAcne=2 zncbk^rUe?UBEZU`e)<-p<}PFl(oJ8(%9w>5HbIM-EwZh`##k%uWk#=lhOmiE=0QZ$ z*6+N8M5gAuJr=DETuv^uuCd$_Vc@3<^pTCnGrdjkNuMh&{6KDspW(Lk8kJg@yrAFP zu-5+_2;{jf7q3(eUCfTEXH>wh8Q6VV0G$cce>OBVN?|Aav=kr2fcoxt@v9p8%nL3e z&U+NGiMog4z0aP=0@$Kl)jcW=P*oYRq*;_d>i`R+iRUFln3HZbx)g0WXF(nEP64Cs=UHtryQ>t0`2sl>cs%TdDMA#AB{5xK|5ng>Vu#m zU%4P!AIwhju|E^pMYY2~4qc;}cMMQ~MHKaVNRYVifF1h>BZBX3O#P?Ck-EIYSezPa zgZBho3vGV1))d5z!Iw=vcromVqZU{gwD%1+0$FgjCmO_^8hLKV`@Zpd-rsZ?r^{u< zXrJcv>?MCufXH;{(6F-hJnqO(%+3U;5A`qu;)UD8I&3|Y5bag!%eu)edhf4*gn=N4 z1Af&j;JH?0fm~MeR|tldw6|U+3h}t4x|`8d_TUAf01T_r%=7oe@Vz3 z_SM7t-gD07b27|Ku7=!x$H(78!p1w&I0Vv@_yd0Anxe|B$IAR1s61TeIoqW{4Lw-) z`1Xyp&MjJ7^n#?*WIR_>=Wu365@6kKu1HbptHKDKy$p~M(|{zqmWmj2ACBo5D5$;z zb(!g*P+XrxvwRM$OG#XCkqh}B z&rS1Ah&ii3qivg3Nl7{1Im)>%@hc8CkIxp(tBDXmMz0e7eKM3f<@yt=s0j<{V2!3~ z%F{~F{c&&F{S=rEhZ_hN&m4W_c}fqd_-=wHr~u`S5-*;EYxekf6)Xt;d_kjV;%8dN zO2^x<5zZGFv#h0)`e}4it$E{?%d-i`x+&F%utM?swx9aIrI2aw0g~v>S(nY6SpeYH z5)zg6jNoSt)D@@aCNNC%DJwC~c2X7}aT+o}`mU`G_j$gj9&B^D_n+Z<$PL*%6 zMu#rVE={Mb_1HGejO7PC@gcC=`C~-wYDFp%>R#<4(W12T(+9S<^LX~YP@DL%|A8mP;kuX{q~z~$=<~%Q>6_<2 z=xly*;HSsq6DQ31ioG7@zNabL*hhVdZ}&+-U6k0r;L{R(B@Q8U%kiANK1-~mv3M-= z@nk28$o4=@MR!wG%HK$JLNvF98mREvc66) zE~oYqyCp)*tlFeEblB*0ozoSz@#BRrk1l;DVpmpf7A8TUm0hivZTyeDY4}5j8`aGbH<)xNz zhCVpQ(Ob5sI^K8{+Y@m74SPKFQG6u0$6FCx_rZq;l(#G%@nA$fp8Kbdq=YrV1f1nl zbFFZvp8Zvk&`!pQ_;FqAQ#H^{i-+`pU>`hbL(QS2nx0hMgpLqxQaOxE;{jo~xTAfe zvdHrzduI&Nu5~#l`4#X|G$ed>YbGEtRJ^7W&Kk4R6`OKxJ>8o{Z18%3%ZyhjP@(nq z7tp3nk^tVUmq-Z)64&x(B*NyQ@BqhG$U=u`xs9FtA5~arAnu6{Z z;K%;U%Ft!Z;K9{34^oL{6TivIP}oTC=Irby?^OVCnl6gb!q4CqbZSUpb7}mM#qh8W zw9-ffGi_AFQ_%%_?EJFNrn6cv;p|=|KD!&lyTtSO2ul!nd&*3pyYQf<_Pdos%U-lf-j zoKBW+YL>q86W?PzClL~Z56Q4zCG6wk@_6OcB8g{yVeS${61?QKG1@NBPFQE0vQEG> z2fOEyFbLjkMxMsnLj8P!i9ERlLsAg zxJKDig4w?q?J0c^A0K=hjh=b|zg9UpdfX5ui>7K<&#&dSz1A z2!jl_-y4W$<4+fEVckQA`mZh3pSz`DQx}ubBIrueS$*@4%f-XAA%QEo)w1b3y<*jG zS;qM|cCAiJobDGKcw?;6-jg?vqWcX{(js`5*s-V*$kc@Voj`T}=&}uswBDHFfQREVbBE=zdW)dka z{0IwS*a7#FJ5^{bUzA;T~A3`n#ed1+! z(vMyNrO24BkS#*&m1qHY2<7dSO?;LOwj$}@^W9goDX5=3D1cPa1?KRf1_jsWiLa=D zbcgVHuB`M(t2f(bSd@8sDb6m070N#|HnpMKF?bOwtWFuup?zM8&oP)OK# zro7b2Y`Xd_Wq=?Nk~?m)xgc&7Eb5^vV7(w@_>Rw&ToP(L>rh)FzheY>jmsy!EQk2f zf`@2fK_UVoqzs!JL|_sdbYC9J!TtB|t#x@f#wwGGrh`*OAB)N<#i=STq_-Bo!La6E z78)`6%w``>3QQ#TBm*`nP3O;TdWnx19ur%`m7v_Zsrq9k`G6cD#E(t40$Q1@MC1F7 zP=we3KJdt>R_T|_3En;k`6BqdLgP29oiih(H@RpWy@D2s%epLv2T?3<3ANP9=}`Z)xh3O}E#5kdeg8A$OF?98)ZS zyL2BfXWh+V0uIo8XB)U06ix_HSNCDZUtX<+iuJ=-mc{T9OBMkSk5!HYzVME-fFLV{ z)2)Z6xn>-~S`rkD3iE@AZvD0gd}M%+R#JCNvXN>1^X}y%hG1NT;FJ{J?@#xmy_iU- zBAmj3C3gFvVx~pO;TJ8$tEggb{>?C+5Aw&R^1C}aMaFS(vXRnf3_s*Yz9&A`7v}^#Wz4Qm;UA=_0oo0)}4?B zz=ISd{GL|2OPE}m-=yP9CQ-k%D}D<%YlGf2zOgSPjZ-F~1W16FMd^T@i(-8<|NClT%c}2Dd~Wcaky(eQ zBbpW3RPW_gKEIRF;pc&RQjh=zhdbms==zVG$=}v&&s$Z^{K0pDOT|5}(Yv?kg6uNY zix_|ptkPhhKrNLk1t=xuSojbdP;o{eJn?+_&=XOTG6BgB}6l zGN7D_v!NOdg-9V9=dmXrWQm7hpy;J2r~CFt*n-gfs~a0hhL5mYvQWy|DvkH{hZS{J z11~Nr#O>mjJwvVQ<~W2tQ#YGS)(LJ@hF}RmBPM-g1SpZI1EKoLyL%r$v+o1XB*id% zw&v4qXc)zM{n1(MK+jz;nQ1C7A_u!Oy00KERV7~(l?K#5iE*n?3g7gR=~U@!z1cAW zrI>7izni~Xq=3q&t>dB z82Dy2@U8+44`Gr|${Adw&ZU7ffj>p`3Lp>`y)=4j0**VN60k8i&1XO85U0@Im1DkT;JmV|vdzy4{s)j0e$;Y!mBn zi@hl@4eQ3(9g$OuL~t z>aGW$7emnPm_nD1^j)fcue0`tejdRU_dAt%HZRsum=dhV;u@_zaP6RMv5&%o{|Xmj zBIa+%W>YKVRJ@zP=*SoPdD>}9PG2jxI?9~56V2Jqs5*o)I~dT=IFhnY@=0Gz%0qeigxym zv{HaWIR1!(e0OUVDlXzeED!B?Z(zClcgq-F3y(_V!T5JWM<=rPI**1R+jL=B@$ZH& zx=EW>wO|izJes_AmOTCTF4-VWn-y|h>#P_lFf68npMF{_$%Gu82 z<$HqIkcgcV-Ws71Sc|1))NH_yh8x*;?7LIsM#Ua?`ons@)94wZ~ zt$1eW(!rB+rAT6bKaW08Up3%YgqKcl(8=7pRAb|gxXxP`TE$19(iJQ;E(Ll@ed`u7 zjE}RhUWHfQ4mb;jH(K&YAs%;}I3GZB>1u*!cHE^eY^k4!^8#gLAI-OqGr#iIxxe!J zGctC)7}wb(ymv+|u>yX<byOjDiXBEQDp+60B zoh-NaK5;-xsc_QPFFG&|fchf_O%w-s*3XAuDvKAqD#Ec6Xj@$Dvs7;(ANo>gM#fOuV}s|+<)w}u*xd_UcR2zmF61xzKF;iaArQ;m-!Et; z#;#c}?p=E}{*7b<&S$7hB?HqOHc&ECep)dsOx=KNHY>mXsjIVEJhbWi>IE(SCG zrEr@kaE3uyKG6?=^_am5BJJ>)`=c~k9&m%XK5+WPOEj6^pJx^X^s zy=pP9UsotSGc?G^xYk$;!Mz7ZsnG!8p_c&s;oS*zF~^(nbMWyx1J#QJa;r8L#qa*& znO|bwbq}3P7BF&`?fb3^RA7dBE9-h}bkNeG}=1CN!{Nm`A=Na~7B`y&RO znGrB>+T4c2dHLSYnU4}C?$WAY?i3OHqD{i9GZ$l0FY53O zCfX8n?B5%dWkouesoo&vUZ1|onG*U?#s!UtZC<0rfdmc;ZM9=#*Cr0jrQMAlxqheC zPuksYlljaA#f(h?M+B3vb=Qjul;}mr7lI2kvz$l(ZfNe~7GQSI#c~rANMDiAyXHMo z2>z5r{h;H4>ck!@YeT)~@lig0&9d{S?CFcZsE{y5@`&1ZAG~v-G6+i?IcEla-5G0Z zF!mA>6@>OnvP)tkD1k?f2uaN1G^i!kN3CVTkX3n0t9`cEdjIM&T*z+bl;!wG7`k=v zZp#yl`;6j+3WBTPS>2Nr((YW9H*nO?^ucek zBIF`h&+s5t2QCBIL`yZjPaZ;jwI28RGtfGcyb>&91{afkRol_)>I@oxb3MG)`D1y} zp^%R(%w$wQIK~Oin1d>#5y`M|`^0IwE2X3uU-U)8;<;h`w<&{Q2Z1IUsJh%@o+8x^eE*6G7d8zo#DC#}23R*g zYxcOKuht2*^>WzKTKTCIV?BYQ;3cke^|@gmvpm{`M}z6VD2RL;_3{q~1t$ESuensF z03NKlQqCO~kr%DJVD+_BIeM zd(7VQrfFho#wF`)-r(*}(9aM|P@&5~AIU45e(${`b`!dhatYD8T9=9Pw;eAEtPOs& zOf7bf{CdTy@1088e5M$`%@ODYH~SSG{;OUHBh397r>M)T1@^t%Xo+TcXi4;PP=N;H zr1BFGNL=WNGxNX&oSGa`^Sf*}&4nkvK>4WFrgD#dh@*?Q7dBXO5xzaK^)2#p*E00{ zrs$lOd-o0bnRel}>6Zel@9;ZA0+2{L*xu~mGwXUojXb9qu9qVNKQq8Ix-XqNRH|%B ztzG28&VBwP&O(=FvE=&QTwA^fsHvpH=*#>0q+lL708uF>v@Q=9H6ia#TOpq`tWT>f zARbkF!LEQI2H4v9Xc_;KjVnjZIM%je3{~TipaR4uW?1z-L7ktg9sQBbZ z!efjGpJ>>KS^K6lT5B&}dK~uKWb(#S1hi%dnJngy9^c!D6w~j{kuf3HRZF+$-VPZl z`h=Tp_@u60wf*HN`g_s@3CxP*g{!?YOlS$}&2w2@t!6yBDNiQ)7(`zN${uT&Hr8k< zuCxCBwf;_@+we-1%pvc4HGN9Uwlz>-7(nT72VuX8o^{I$74v6p65s?(8$Wr<8jOk; zEIhmFGipD|@v!Skv;sk=M@o&DtV(I9x2|1VAei})^Gl7%A0E@BADhAFU)t*`OYqN|C3kA+j?$<^S+M7I}Td2I#!R z`mH`%tv3MWmTPF5N7@VP7~0>1KoV`w3=e3+_KdSZ_7d(08 z*~X!usIvr&=VBqrGAj;wD+TFWa6qI{A22M+orRs~RhEY?H)=iTGg~0x@QK_=MOwp2 z(>)H*n3v6L8X~@G$@&g=;L3uobQOF=;U4Oq6*%)kY|;wMYL?#*g3%OwO-2;bavOG- z>E7mHgn6a6GXYxsnni`&6;(2SDbTN1R65WI z&>#I%#$3#2TR{VQ$2qhQ`2lqWZ<8jRPbrUNrJ3(Jz(U>ENg_yumCL<9=~wFNFN0uq zY-){4*Vf}}LqE-KY0_pBi}(a0UWdBJ$Ifu`=KWeZ5cYfd^phf69?F^|mfBNAPk4z9 zv!r-Gb>&SI)QVET8w)A1`(YrdLsZJ4+{{?%s;}Q_1N1^cuPin@B3Im_INY+VmK9UE z^~=&AQ51ZFJnSr!?eN>r%4(7LW#xpU=5RG+DLBP#Y%ip2GBX~n#B?&!?PO=j2JEVX4; zUA!jHvT#6lBL@;i`D9!(*CFU}DeVFRnLe>)gaD~Z6`JK{x5o58 z5+X|Cn=bQKUOtR*fxfzFgdJ4;IHYM$89o-}3WB48x-?}BzhctL$UXSK2m$bIJ{2tY z^S~aayg+ASv$MgU^E~VHu^$G#K!<3XJgTG+l^@{phe0tA;13vMd+8syl>I$>CYEsC z=o`xKaR=7fewYn=nDKm&cKN(~2>W+}4D@F-kFJsxPZouW0%usB2WNqXyN7D}BA|~y zu8yX5eEguyfAwi7bAYRo%D%$t8EX15ns%qPSdNAxZ)g^T8lCh$=`^Dcgo4xD zMbVfJPg_~zrFL&tzQ4%iE-p%8;?^>D6{kiU+icwuP35@N&>2X>6(K=fm#a^~$Zr*W z8b|0c+7@#bEvv44%f>Hhmd&q7Mz0)U$!z(yfJ?oqf-v4mODvuZSHP^%PbHD3enMo| z5GY&SQlne)uW~OuKgpM}lDt|H>z- zQIF4XBSvDwh4IVc^MQ`XKRy$@GXxL%Zu zDVm<%8U48OU-CBbcB3d&^IaS(&%~1+WSdoCc-Xi_Q&FKj#R_#(7cK~s)euT?vSvE1 zoPNd5N&j+|TZIr8(~einnqd)3_4_EXH-EiL=(?pup^lAu#Xun>p#z%ED8?)w)wZsO z4!kE5I<@#X`Obo)VS!PM`q1VZ7H-W;AhZ7y+?A6upf|c^-~w7<4y! zc7L5u@lSG$d-P*f^7Eyqnn%q>H3_dh4kMO}aXxsRiU^ zGv-h_Yab4iX`FsSAv9)OqA#l@e9SA;0OGlzB>Sk?CqxH(o0!Eq4gk^gE0?=T9_-95ypS+RnMK~r~Yl|DGIFv4y5Fs~}|GPe> zobl&w)|ef38WxWlpp^qLM8DQDvMWXI)dCXgJ^7bZmbNBrLc5I|#4ki5_* zzY;@kQwkmFfm?zg_bxp=Xy@)#mk03Zzz(u7_F02(np= z9x=P#X9wZqq6RUfkj!r1OQQ&$-!Q_)sozQ0xW*$)SY~^w3|cL~ssv zAcXsN*4*hI2Od9yz~WlGVB-O@T^#?nKii<;Os{B~@ddf=l|{}F;_yvw^#>liL|Mu4eN26^p$ajP`(6I; z(?I$|JYFOL5BGZQN0;jF;Njq31I6c#jy92ynbK@CSm zVxn*oYeWX72Bt3?OriWe-KO0T^{m)kj%+LIo0HTFi z0K!Y4{c6`j73lzA6~9hrtY2um&o+!*Us$9jEfeoNX7RUFdycur04lS$dXo&d+G zfTP~1xz7)N@3?HTaQ-GH$zehc2~;i@>(D1_oifQSR$u{oaK6^n9R+){0X-Z5NVuX< z-4MG1j>FkiiTrTA9r90*r441BU+?O2$2X*}bN`XVBIMW{b@t8ATjJ|(c>ZyH>i5b@ zzl7JQLG;chR2v#b4ol%s;N+bV-agAg=t0A@p(qZ`SVqP9Bm2#N9eQy1-choRx=q>A z<^HS2n4MZEl3Lso{8#JHlQsIy?s2aAL(wMaO-GV?Gj9FQ6*zVNQ9I4<_`>L<>Ja~) zX~!4tro2jZ7rrbbhJ=!T{6$>ZC_8O@S`x~wQA?HP> zwPdl0`ah`|07@*Rd=HKjrk5Le`oGm2C!|yS^No)Tp-O#5;@?R80D(%gZ;A<@eUAQN zN87_$EBMI9T&J!v_%Ah8pH0UcvS!89aQ;a~MPEB$cW`k^n|mwwm%5;b|F61jd)^Se zn4+Zr!p{?0LPPF3ZBaJ$<^JAkHqhEku;!V>kZ8X=g;tE%Y1DUazxsvC%!1JYy|#SFINIhRr~b~ z;od8{=;D=%ipmvH$WV%;j7^S1$RR^?4N)==-D^mO$}B=@;NslObFMLzN(do;WhOGu z{|Bw_zrJ_9Yklkc*7v?^-L>v{_I;kc_p{I5`|PvNJ-?tXXa>m#eBAP1p-g>HOS*Jo zytdacDR0xe*0SGCl^iIW%dQvLu+9X2(?~uj&Q=a#IeQn~LuCLY5*-)Lim1Vi;52oT z2|`%C8;zAP(1A&*cg%?Y5YV0hh}8tswjdlB;`;h{lxg27n3;|fqS8A=sSM}vtpG*= zlaA(Li6^M&(S3`qAuS;>TxVDJ#IN(yDO0D@L5 zvN(0{fyoCLPGAcPLQ9!q=XFiuQ7ntw(j6^>QGi+@L?z#1u+2~6V_=|b!0tle zb_5TBqbE1dhm^W>UXbBr-Dx53btPKSUqF%nw{v-cNgPwlq z_*gwQr?rLO-1qIJ^$GN}o9dD;oW?QFX1>PE@8l$inE`I@zy*TJhi2J0m z{n~hC?%k`mWzBD;$xL;%PxY@fwoS$}FTJ{K*>;iMl#6~T&e5N%FVTDvtvtHs1Wv)~ zPRQHPsr6+2{Ns=W*^UL1<(Kbto8H` zHlE{%gLU4IT4@b|Z9|QGNa?QD>&NnQ5rh1sc-=l99Rzb*TmEO@;}!E?So zD9yuPcdbi%PgPL^|I^h>C@fNMCeY<*mc^Naf!$t}t{9}$5A}NI@^l!snNj34Z>;ez zbTstbc1=3lOs~F3B7jX=VppY!Z1qyKdY<)Y_L}$iS^nvH^V+7Oh4E)c`|gKGS=Jme zr7N_etgkN+rafdj9Yj5KtTlpOR^OG>Wl*k%j%LI~20eqmj>C{2hm{zT3TQ%~wf{gAp%svu^RvAj0nd{QqpYP(0EOP)0{1dib56>#7{Slm9X z7Fjs;+-B=-kp0LQ?BJt@Ax^MOS%{2B#_WbU82ibf`>@`%KzTrS0i;C+)BWJi9{|cQ zywO>2KY)Qx!g?2BeW18vH88GsC4iCPK5omeqzWNhM?YaBzn(QNd-vqR+`gQ1`w0oz zchC=u$-6f@1|NQ-)5vc25pp(xZIXTi+mJSaZLEK{72<&m@agz(OI5rcU0j@+E!>hs zpySHl8#OzGUTF?bD^X!56TBkjIm|t-8QEap1d|DFdc)W6_sH84OU}Z{x+C0Aoa-6y zB%&$-5^YjtrZ0Pw6{=scemesvE4sSEACSO+=gZca^Y7z$S9zd8tup%o*K@WKy_F(u zIa*8o?|bWH$}|}ZxS+we#!x_n=CPaYJtPhFK30Ts9HDqMc2YxM2hX+ zXcA$&e8ZrUXE>uefHCAfjOfD2JR_DfvGuyQ!vbC}@vX%&Pn?32w^_6+E#XZbG!>71 z^Msil_EJAkz*U@<_Vna-pI%?aLw3S3N5$B=Rl3)>__iu`2bz3VBwPMZ@stXPp5RAKR68wp)80OMCS&TSG+2O#0&RZ|E0rx+x^}yRxSZ5n(EpW5aC0uu5$c#-{5KG*0R3c!FS+3BEcSESP z#I|_Zn_SQ_z@_OaRo&)?Cfs`;RXMiw==s<*9k-uMpbs?=tvd`T1RV_V^i-X0*ym5; z)bn72jmN&fJ1&!pBYp;KotoksVmjg|gfue2%wQ_OXI4+(-umPGa z)W;)Q?>w=@&ln>4(|~sL2~#fUC9uV{`gqMnayqI)K~?rmmARLfI^s9fHKvjfP`+RfDl%yET+l%iKD9QXjDJ-Y>j4?aq_Ry*QYs!u%$y*~{ zHEEmE<&_q0BFyiD$ z$5ms9`+}msP2~W0<3}i`2ihbnHmKiY*-ZQ>>x=4~zREdqwOTOwp(=bYJm-X}`F|-f zg89K;IB7c&3IU-=WU5B;Krhu`;dd5;d|+#b=(vMsw})g&n^d(%iLMu*B+JDRpA~Yv zlK@89Lk$;gRUI7|`NIiZ?TQd!h*^iy#zaC*93E5n=;cZ99k@KOxz&t`i$Vi6RsHu|y&2L?M}A$PLv=hmlOU zH`%d$sqlK^tZFymO-hQ{5{~CBT;p&n6u&)Ie z;wUid$66b)clY!0&=+9CJ&@CNOYBDUi{~v&n#9So6E9ODv^L37Uf>yiCOo%@3b+N zCz|Xo2t@&*DD~ggt<@G@_uBBVS!AF)IaA!>MCY0G5l2{PdX9yyn0xS ziteR`zb$;1>+eK;-)+90v9xq(MG>mU{o&&S8Cl?w9#r?8>(IqE@CS+)Pb4Qqsp0jx z6?F==mcXQdQ>wDpwFG}S;ogQjOLTo?yIb3-Tda;y1Q3eRBFtefUne8$2|OQo-*d&; z#nem7vfPq%oI4#?sAR(PgG+)|xWYSmiLskZcx}M>b!(#85v2_+>nsI1JGUnKw;dCw zPWYlK3Qb=cQw2Y@%+-Sv1{8H3r^Rv(59cliwq!E^j@XdO5#2#PC1=M?t1k%QlB`ep z<~j(!+(8fy{0?Hv)Gg(lOX`&|gCY5msxc8-H&L2Q0B8lPRt61XtB;0JkcY~+p<^$8 zV17EVm&?=!oDiqt{FM;OBg1;b$a0Sl_}%>zDSS{Ji1AmcR7YHGi<41KJ71v_14PFS zylu z3H&SDYa#Udj`>)W-rnNmA)A*!5%PNerOo&c>L!)N`Cys#Vjla|u5q<{Qqe6Xwc+9u zr{P}i4pA@JXDQYV+vZaEqo3O~+6)Tf`~?w;EfBAN)obl^bj>M$lx~r8&<toL z?~16EU++$YW!CmafR76~QhBuNRPfWo{64*J*k-?-3M_qzk&akhds1NPVx_3q-0FO2 zYwUph$U^nJh_6{KoWMO1gfdlg(hN5aQajKff*m)M^tqy1)&bm4HhH2t*m*yci6ZJw z`o@W{Bnn{1c_s)5%f0Us(7+ZCHQ0C0{=F3r(Wpv-zv{I8b#GOEtp5_3U=vZycc@K= zCcxvco(4iC1r94zr#W2}{0j2-(|#Gg2l>5zPIpd&v{0#8ChlZ)6T1sUS+ijJ0znxD z&+aQhzP)8=wl?+P412{y462eE2j5cdXt-k3h%HcslXVJvT)SrZ`AUOK3qVG_NHWrL zZOgo^$}|qx=w(|OgmFpa`f8I2X)?9dM@K*C-PJ@vHY)wKozHk_&G=S?@C~3zmeSZ% za{Hjz4N>a(?Bw8{{$-Cubz{mHSSG7AdBsJi$w1VY!cE8NHC$Sjk{rSg=;$X0qAFd# z1b@aEEzcfwCe&kypEHA8j1j}m6Qkfit#n+AZSlaTxY~k6RrVWusprd;Y9ndd@g*x` zlivkZzi7d|`;W~zrrJkM*G&$Gd@Ni-fonB|$IqK8j+OTaE27@08)LhT$TQ@RvQgEj>Ygg&bI``UUVOzIZ$%g-i zbNpTA{w|4sKSYcD9~V9uZK18hSH2PQ9=SexAc*0wFfRWlJWby}1D5^`ps0TZnz?~H z`zOFsw|@d3_CL1$Gh!)##r?i2JC4RKQd literal 0 HcmV?d00001 diff --git a/_static/demo_thumbnails/regular_demo_thumbnails/pennylane-demo-geometric-kernel-difference-thumbnail.png b/_static/demo_thumbnails/regular_demo_thumbnails/pennylane-demo-geometric-kernel-difference-thumbnail.png new file mode 100644 index 0000000000000000000000000000000000000000..0570ede05b5b7d013e4cab8dbc2efe30f957f000 GIT binary patch literal 5516 zcmai&bx;)0x5p6>5QU{(YL`xF0a;cWfdzzJ8fm1FkXlL!ap|R`q(Qn-x|Z%1q`Q$? z5$VV8y}#bfn|a@J=bn2%^Zn<}oIlRFVVbX%NQmi(ad2=*RFoAEI5@Z<9GnNQiSX{p zt?vfKefp-Urmc8)ch`4tb9dNw5AG)IWn^UDdVUND2oRQ(Zfk2xtZHqmLCZh|1H8-t z0J5Xynxms5GQblMm`RY2&CJYf6!X>0KzVtrV0n3YbaXWHi~ObT zRB^AoU^E)-Vyk^O=dTHaYHDh_xVYR+yWXwF-|bc3ZDianhBhz$Ry8p7Psnf$iRO4F zP%*lg+diUi<17u+2+t`82|v%r!J*1gQIOR}&K>-6a0-$EzFO^9>wDW_g;TcBMWpcS zjx-`%anh%V{A&XC+8mG5yjAc1N&k=T6l^1le(+yua0LiGS)|#M8{gxLm3iB!sn^9A zHe^_A>EZO-JGOBwkxE!A|7O5qTdy2i<+OQ3V^8)>Byd(J8WO7O&GON#WGg`#PCNvI zbXApXkn~$~v2nym6yg1hxK>z-e?Bf0Q`X*v(ApoN<@>R8iy0t;^u8$d`!qabKe|dH zoc{fQ!6T%mGzyvdOt+dhx^tWB09!V*T7JCOO)RTr|))S9>ESUK}WR$Xx3YLu{PV5uzGS_3`?MNmDE~zR$)O zmZVViPnoZ=ovAXP?1Y6M>_7z#(kZx9it%jRRtzk*;O{Ug_@zL8@hkt*PG$v8;m;=J z>B1-EKIK+e!UlfnT&>0x-qq{&Y#k0)u$wt=gq@B7>Mw}=7&2IC0gB|^-SiDtIujQD z(K}ljg|Z=YHQ_ltGhb5f5#x$sQ1>90=x043+7^<3Fzv++?2Zc}2e8&=^{uH7m|Pzq zRtlNUT;_U?vVlRA|mSJ}N>L4sIPvT<2@+P|u2j9Y&M<&7ac9-Lh)Kg zWb{r~rRK^+f+iV7U5dU-Mtdl~g$Nhx@A~8W@9ptvzY}x2ngJ#lHaBplY!N3-acJN} zxn!mXnzk${1vzy|z*h3x1IB|(}3FO?S~3snwMd7OpAn4*xbEoDB2}kO<3VF>#a?U-f_oGD=cIYGEKPw6Om&zVa?nMhGeckr3qWA71x(XBLkL|zkHx_5WKN8OUO`cgZd zwDU@!xGa%1pd@d1O-?FYiFG#!Rm;L9UOmkgb3u&`I!{tqXRXK5pSLoLEItOP`lPC4 zkgCzstuY&@x?|)a@OG8SF=3C|k+?$3MKcwi0;N}wA2*@M)=wgz25DA)i|4$(3zi4)5B2v&}CZgdtO<$PtBO{eUyvzu<)@9Wj*Q|6+E$IMa z&;=yEIKIjvPfJFg7X~kylt`c>m>nyWTOkyt6mNBg3yo=zW2wZeAoTG9gvh4Gty z*^`1U&Z2+|5hx|EcYh02uX%R}K-`zf1*1*sTlN&_s-+%vs98OIcog9@c+w)N_~T|E zik4~a!$bbN2XdXw_T1C8LQG(R?rT0=0AzP1Q2q=;L=3MHa%U3!QksQV+oyt zyZEupL})|!BdFkCS8=FdvPr@v*f5-sV16>mr+N3H$u5)eTZDYKEd_e+x?OwvIcQ>c zpnoUZZrlOzrk@=t!*iq`YqguvUlQ!fo<@+`(fP-SY^X{FS|8xhAa5~m9kmCjSf6|{ zEZ1OCo`!9)0Ni~8H4V9&+cAR@!-&IFr2+|Id#^k+4IB-NtxDIa3Uw*{kw#LjIT4LO zfHkGE`?k{>{H)yQt&k7@c@hu!B4fXPKZ_7F@ae;JbXxwgEng_nL(f3Q`G1#Z*cm}J z{}s9`x|2=>)DhRjKM+kUsrU*}l zPtGHi$HBMO(XR63zE+)i&{7$T$~Loa zeBMb^*Jz#~8=oMD?~#kj7~`M)VWeL+oEk0BQeI;y1qL{KC>y$XEJAh&NXO5vqN1Ij zmtP81M0IHXYB_Fy_^TR>OrAUm#s`0#m7M2LGzUg6CtCqO+iaMTQ+x3ZOrGEQ73h?g z>(ggIx965i=$+GDH``R=pR4F3S#{iu*l@XAIkz$9D-Wy(0j*?Cyo#u0$N(O#L zcK@EraRE+J!)tMua@ zpoAeyMqgPBfS8$td*I>IHv{{Y7D@2T)~5GD+$H1YYwlrHLH4;(dI`{&kr23Mt|a?K zsP9Ts?iP+(?T66NZp>@dwmdv$bYzdO7A{V-6j%BO_pI&=p0K4A7B}uqa+bLK&8b6n z6eY%MxvzUNP&gfG-+cOxdowWn0a2M2Gv;W@tHT7NO3|L;@m+YhWwbm*s9G&~k!eQo zo9$kBac*JEX-u^cPxg=JpL~m+YFQY{j7NUpy=E4c*oucN$dJmE~_%C zgDK5we~{S+Z67RF~9V#yySud4iUOf$WH=fn!mbNY0O$l`sL5#wrsE3^^!iAQzfSatI< zl6o#~3NNaM#}qHXzUJFyfHG31C4yKvM2k>_9_by~wk)KGeH+2y72jHbNjY0`H$X`p zTtMuIjSyj>lhfLuqn~2l96`L%`PS{%6O8qhweROg_`vulPR2|uK?GW-rw_!OIRVAd z+Tt5x^iMRrRzO`THM?8+K=xORLO8%94m2MQJ|Z9U@1O$*xMpaq`~CJCPe0mF$#c@} zbA$4U^~;M0{iXc50_5j%R4UEhvI^4&HXT<|*TE|U7l}^w;GHf|B;(enCf6=6krUd*Wh{xLts266=~MQCX6*V0jmI=>Ys*40xKA?sG6FYM)So)maEIkp z@4~N=N5j=#;m@?0@$2q>Vf<-6q?JYXwlOhI4j9tZ`}#O6z)~CNoKHV~4=3pR_(C3(VK*t?2C3kB<8ZJ?D|z$D^VN(Oj6V1e^Y@vPs4A02eUB$FY5&j1 zz}iM`s#(btn>-DTINX4{GdAL^{Pju^chnwUS+q+`(>w#nVZFkJ>|dwyR_!O+Z*BybH$R#gxSUN8DKqJ{6cv)q6D5(+9oNWbP9wK>RJY*4Nj7mrUjh(EaTn)g z5_J0>05G23jqXz~F?O@#0)8z%ejFZR^~$K1IVpoCDzGyK|3yV{U+@&)gv2rF*dgnI z+OBf^Yqo=M4&N8p43ZYs`7*nV**S0eMTP{Mln1Oh;hE0E$>sLnliC>`i(F{1rvIcc zsc9A%UtumPt{d*+2;s++?_YRZX0)L3ws=!cn5femqWPA`vCA}KXUnn3M%LAk_q zR8;H99eg9JZ|vkRh*5s_5KNW~OpgJacB?*Ztvl zb6s24_OF-W{2~4fy^n4U>3V$|5~FxK&H_f8?~;Gd?fTG(*cmKchHDv7nlem#&Gnoc z5?;>^y}P2+$UfTDG1&wkA|%p+k;vI?&8076#~Fagvm+(&>9+xAtw26Koh zc>+ssToF$p8;NdRa4Vdt7bJPNau+V1%Q2{;9A*hL84l%?#piHrC7>f7ON8Pa9dbXt zmG64@?X_`==Rmd+ElwtM-{P-vf>;~pl`Zdyl{Ydt@&Ofps)TYaBJPD+x+* z*KDI|X)kbgf8|%MWn&*jAeQ}(HaAY%ga;HN+|vAKS{VZ%IjD`>JGQLt{Ehv)%piRvEE}$=*8lvA^Y_E8K3;(%R}_x+$B9rv2lLE^cjhTPIx$EyCnjq{UEhG9e!anu?3X@RM^N zD}lB`L_0Lpn+(geC5!vNmq_(ca1SjGFD({nqB_(`XL;FDMq#A4r(3r<9qcE0UDDz^ z>q<$`Dd@NxzdsRT#VjC0^r$L8XB2hS_FYbHES>WMs~;DKSZGg0~U8fshf4GG0P67E!uv*=S>pcEnnDp z8~DXN4tkGnCv`Os?bhIJQ11#GmW;itn_)t4zQ_M|>+ag?yEeL$P8n~IPC-l1tO@gM zy47)NDMje*g6{5%KR@4@3rQCOZT)#3Hh6oS)Q)8=es}vRy-zCVo6Wht$T|p_x-h+t zL2Z0c;+avdI0H7DM!@RI^9&}T7lo+M)@OUnkhVsuOQ%Ik)!A(qG3~F@FVo%pIF@hD z^oPvJJA+LY`C}w7YmibXBnV*5>#y`e^4`3dmkIZqW<$w zcCUV1`quw@ zKNO{N!`N1UwDYXAje~dvz_n`FqPp-loowWxng>1#_j771whQk+fd&DrBT_t+pveyQ zGhE&40CGU?A?>+xEK;An$m`0798l8yoYDUiGFcSVYRp3psF_$b%1lK96MwX|LPC*1 zT|2L8jf(gkv-st&j&rZLt6?LRuMm#J>W>{9hw0fpirin-d<{cc86X@VzxnVspM^dN z!nftl7+tC}Do21tOtXxX&F6iXC@ya$Y3{6qbB>QPS#s&9-V9t zc2@cP9#KhbcdGKbxlxmdX{n{mBGl9`@U6-$<~s+B?(yyp5L@XqW?}E7ju)mKD_^1b zWS2+8_2sKrL8D?>?( Date: Mon, 1 Dec 2025 17:00:05 -0500 Subject: [PATCH 31/31] Update demonstrations_v2/huang_geometric_kernel_difference/metadata.json --- .../huang_geometric_kernel_difference/metadata.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json index 0c4369b1b1..b0f7b08217 100644 --- a/demonstrations_v2/huang_geometric_kernel_difference/metadata.json +++ b/demonstrations_v2/huang_geometric_kernel_difference/metadata.json @@ -7,8 +7,8 @@ ], "executable_stable": true, "executable_latest": false, - "dateOfPublication": "2025-10-06T10:00:00+00:00", - "dateOfLastModification": "2025-10-06T10:00:00+00:01", + "dateOfPublication": "2025-12-02T10:00:00+00:00", + "dateOfLastModification": "2025-12-02T10:00:00+00:01", "categories": [ "Quantum Machine Learning" ],