diff --git a/.github/build-config.json b/.github/build-config.json index 7126728..162b12a 100644 --- a/.github/build-config.json +++ b/.github/build-config.json @@ -1,7 +1,8 @@ { "gpu_targets": [ {"name": "gfx110x", "pytorch_whl": "gfx110X-all"}, - {"name": "gfx1151", "pytorch_whl": "gfx1151"} + {"name": "gfx1151", "pytorch_whl": "gfx1151"}, + {"name": "gfx120x", "pytorch_whl": "gfx120X-all"} ], "default_gpu_target": "gfx1151", "courses": ["CV", "DL", "LLM", "PhySim"] diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 62cdb76..e873c80 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -319,6 +319,7 @@ jobs: type=raw,value=${{ github.event.inputs.version }},enable=${{ github.event.inputs.version != '' }} type=sha,prefix=sha- type=ref,event=branch + type=ref,event=tag type=ref,event=pr - name: Docker metadata (unsuffixed tags — default target only) @@ -335,6 +336,7 @@ jobs: type=raw,value=${{ github.event.inputs.version }},enable=${{ github.event.inputs.version != '' }} type=sha,prefix=sha- type=ref,event=branch + type=ref,event=tag type=ref,event=pr - name: Merge tags @@ -455,6 +457,7 @@ jobs: type=raw,value=${{ github.event.inputs.version }},enable=${{ github.event.inputs.version != '' }} type=sha,prefix=sha- type=ref,event=branch + type=ref,event=tag type=ref,event=pr - name: Docker metadata (unsuffixed tags — default target only) @@ -471,6 +474,7 @@ jobs: type=raw,value=${{ github.event.inputs.version }},enable=${{ github.event.inputs.version != '' }} type=sha,prefix=sha- type=ref,event=branch + type=ref,event=tag type=ref,event=pr - name: Merge tags diff --git a/.github/workflows/pack-bundle.yml b/.github/workflows/pack-bundle.yml new file mode 100644 index 0000000..86a57f5 --- /dev/null +++ b/.github/workflows/pack-bundle.yml @@ -0,0 +1,228 @@ +# Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +name: Pack Offline Bundle + +on: + # Automatic: fires after all images are built on a release tag push. + # The job condition below filters to v* tags on the main repo only. + workflow_run: + workflows: ["Build Docker Images"] + types: [completed] + + # Manual: for testing or on-demand bundle creation. + workflow_dispatch: + inputs: + gpu_type: + description: 'GPU type (determines target architecture and HSA config)' + required: true + default: 'strix-halo' + type: choice + options: + - strix-halo # gfx1151 — Ryzen AI Max+ 395 / Max 390 + - phx # gfx110x — Ryzen AI 300 (Phoenix) + - strix # gfx110x + HSA override — Ryzen AI 300 (Strix Point) + - rdna4 # gfx120x — Radeon RX 9000 series + image_tag: + description: 'Image tag prefix (default: current branch/tag name)' + required: false + default: '' + type: string + image_registry: + description: 'Registry prefix for custom images (override for forks or private registries)' + required: false + default: 'ghcr.io/amdresearch' + type: string +permissions: + contents: write + packages: read + +jobs: + # ── Automatic release: one job per GPU target, triggered by workflow_run ── + pack-release: + name: "Pack Bundle (${{ matrix.gpu_type }}) — Release" + if: | + github.event_name == 'workflow_run' && + github.event.workflow_run.conclusion == 'success' && + startsWith(github.event.workflow_run.head_branch, 'v') + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + gpu_type: [strix-halo, phx, strix, rdna4] + + steps: + - name: Free disk space + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: false + + - name: Check available disk space + run: df -h / + + - name: Checkout code at the release tag + uses: actions/checkout@v4 + with: + ref: ${{ github.event.workflow_run.head_sha }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GH_PACKAGES_TOKEN || secrets.GITHUB_TOKEN }} + + - name: Resolve image tag and registry + id: tag + run: | + RAW="${{ github.event.workflow_run.head_branch }}" + SANITIZED="${RAW//\//-}" + echo "value=${SANITIZED}" >> "$GITHUB_OUTPUT" + echo "Resolved IMAGE_TAG: ${SANITIZED}" + OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + echo "registry=ghcr.io/${OWNER}" >> "$GITHUB_OUTPUT" + + - name: Check if bundle already exists in release + id: check + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + TAG="${{ steps.tag.outputs.value }}" + GPU="${{ matrix.gpu_type }}" + if gh release view "${TAG}" &>/dev/null; then + # Check if a bundle for this GPU type is already attached + if gh release view "${TAG}" --json assets --jq '.assets[].name' 2>/dev/null \ + | grep -q "auplc-bundle.*${GPU}"; then + echo "skip=true" >> "$GITHUB_OUTPUT" + echo "Bundle for ${GPU} already exists in release ${TAG}, skipping." + else + echo "skip=false" >> "$GITHUB_OUTPUT" + fi + else + echo "skip=false" >> "$GITHUB_OUTPUT" + fi + + - name: Run pack command + if: steps.check.outputs.skip != 'true' + env: + GPU_TYPE: ${{ matrix.gpu_type }} + IMAGE_REGISTRY: ${{ steps.tag.outputs.registry }} + IMAGE_TAG: ${{ steps.tag.outputs.value }} + run: ./auplc-installer pack + + - name: Verify bundle + if: steps.check.outputs.skip != 'true' + run: | + BUNDLE=$(ls auplc-bundle-*.tar.gz) + echo "Bundle: ${BUNDLE}" + echo "Size: $(du -sh "${BUNDLE}" | cut -f1)" + + - name: Upload bundle as artifact + if: steps.check.outputs.skip != 'true' + uses: actions/upload-artifact@v4 + with: + name: auplc-bundle-${{ matrix.gpu_type }} + path: auplc-bundle-*.tar.gz + retention-days: 30 + compression-level: 0 # already compressed + + - name: Attach bundle to GitHub Release + if: steps.check.outputs.skip != 'true' + continue-on-error: true + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + BUNDLE=$(ls auplc-bundle-*.tar.gz) + TAG="${{ github.event.workflow_run.head_branch }}" + + # Upload to the existing release. Releases are created manually with + # proper release notes before tagging; CI only attaches the bundle. + if gh release view "${TAG}" &>/dev/null; then + gh release upload "${TAG}" "${BUNDLE}" --clobber + echo "Bundle uploaded to release ${TAG}" + else + echo "No release found for ${TAG}, skipping upload." + fi + + # ── Manual: single GPU target via workflow_dispatch ── + pack-manual: + name: "Pack Bundle (${{ inputs.gpu_type }})" + if: github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + + steps: + - name: Free disk space + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: false + + - name: Check available disk space + run: df -h / + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GH_PACKAGES_TOKEN || secrets.GITHUB_TOKEN }} + + - name: Resolve image tag + id: tag + run: | + # Use explicit input if provided; otherwise derive from branch/tag name. + # Sanitize: Docker tags cannot contain '/' — replace with '-'. + RAW="${{ inputs.image_tag || github.ref_name }}" + echo "value=${RAW//\//-}" >> "$GITHUB_OUTPUT" + echo "Resolved IMAGE_TAG: ${RAW//\//-}" + + - name: Run pack command + env: + GPU_TYPE: ${{ inputs.gpu_type }} + IMAGE_REGISTRY: ${{ inputs.image_registry }} + IMAGE_TAG: ${{ steps.tag.outputs.value }} + run: ./auplc-installer pack + + - name: Verify bundle + run: | + BUNDLE=$(ls auplc-bundle-*.tar.gz) + echo "Bundle: ${BUNDLE}" + echo "Size: $(du -sh "${BUNDLE}" | cut -f1)" + - name: Upload bundle as artifact + uses: actions/upload-artifact@v4 + with: + name: auplc-bundle-${{ inputs.gpu_type }} + path: auplc-bundle-*.tar.gz + retention-days: 7 + compression-level: 0 # already compressed + diff --git a/.gitignore b/.gitignore index 477e4d8..121d4ff 100644 --- a/.gitignore +++ b/.gitignore @@ -372,3 +372,7 @@ dockerfiles/Courses/DL/data/FashionMNIST/raw/ # Local config overrides (any file containing 'local') *local* *.local.* + +# Offline bundle artifacts +auplc-bundle-*/ +auplc-bundle-*.tar.gz diff --git a/README.md b/README.md index 3eff5bf..026d7ab 100644 --- a/README.md +++ b/README.md @@ -60,16 +60,21 @@ cd aup-learning-cloud sudo ./auplc-installer install ``` After installation completes, open http://localhost:30890 in your browser. No login credentials are required - you will be automatically logged in. -The installer uses **Docker as the default container runtime** (`K3S_USE_DOCKER=1`), see more at [link](https://amdresearch.github.io/aup-learning-cloud/installation/single-node.html#runtime-and-mirror-configuration) +Common options: +```bash +sudo ./auplc-installer install --gpu=strix-halo # specify GPU type +sudo ./auplc-installer install --docker=0 # use containerd instead of Docker +sudo ./auplc-installer install --mirror=mirror.example.com # use registry mirror +``` + +See more at [link](https://amdresearch.github.io/aup-learning-cloud/installation/single-node.html#runtime-and-mirror-configuration) ### Uninstall ```bash sudo ./auplc-installer uninstall ``` -> **💡 Tip**: For mirror configuration (registries, PyPI, npm), see [Mirror Configuration](deploy/README.md#mirror-configuration). - ## Cluster Installation For multi-node cluster installation or need more control over the deployment process: diff --git a/auplc-installer b/auplc-installer index 8f79bef..1d4cd01 100755 --- a/auplc-installer +++ b/auplc-installer @@ -22,64 +22,107 @@ set -euo pipefail -# k3s image dir (used only when not using Docker runtime) +# ============================================================ +# Constants & Configuration +# ============================================================ + +# Pinned tool versions (used by pack and offline install) +K3S_VERSION="v1.32.3+k3s1" +HELM_VERSION="v3.17.2" +K9S_VERSION="v0.32.7" + K3S_IMAGES_DIR="/var/lib/rancher/k3s/agent/images" K3S_REGISTRIES_FILE="/etc/rancher/k3s/registries.yaml" -# Default: use host Docker as K3s runtime so "docker build" updates are visible without -# exporting to agent/images. Set K3S_USE_DOCKER=0 for containerd + export (offline/portable). +# K3s container runtime: 1=Docker (dev), 0=containerd (offline/portable) K3S_USE_DOCKER="${K3S_USE_DOCKER:-1}" -# Registry mirror prefix (set via environment variable) -# Example: MIRROR_PREFIX="m.daocloud.io" will transform: -# quay.io/jupyterhub/k8s-hub:4.1.0 -> m.daocloud.io/quay.io/jupyterhub/k8s-hub:4.1.0 +# Registry/package mirror configuration MIRROR_PREFIX="${MIRROR_PREFIX:-}" - -# Package manager mirrors (set via environment variables) MIRROR_PIP="${MIRROR_PIP:-}" MIRROR_NPM="${MIRROR_NPM:-}" -# Custom images (built locally) -CUSTOM_IMAGES=( - "ghcr.io/amdresearch/auplc-hub:latest" - "ghcr.io/amdresearch/auplc-default:latest" - "ghcr.io/amdresearch/auplc-cv:latest" - "ghcr.io/amdresearch/auplc-dl:latest" - "ghcr.io/amdresearch/auplc-llm:latest" -) +# Registry prefix for custom images (override for forks or private registries) +IMAGE_REGISTRY="${IMAGE_REGISTRY:-ghcr.io/amdresearch}" + +# Image tag prefix (e.g. latest, develop, v1.0). GPU suffix is appended automatically. +IMAGE_TAG="${IMAGE_TAG:-latest}" -# External images required by JupyterHub (for offline deployment) +# GPU-specific custom images (tagged as :-) +GPU_CUSTOM_NAMES=("auplc-base" "auplc-cv" "auplc-dl" "auplc-llm" "auplc-physim") + +# Non-GPU custom images (tagged as :) +PLAIN_CUSTOM_NAMES=("auplc-hub" "auplc-default") + +# External images required by JupyterHub at runtime EXTERNAL_IMAGES=( - # JupyterHub core components "quay.io/jupyterhub/k8s-hub:4.1.0" "quay.io/jupyterhub/configurable-http-proxy:4.6.3" "quay.io/jupyterhub/k8s-secret-sync:4.1.0" "quay.io/jupyterhub/k8s-network-tools:4.1.0" "quay.io/jupyterhub/k8s-image-awaiter:4.1.0" "quay.io/jupyterhub/k8s-singleuser-sample:4.1.0" - # Kubernetes components "registry.k8s.io/kube-scheduler:v1.30.8" "registry.k8s.io/pause:3.10" - # Traefik proxy - "traefik:v3.3.1" - # Utility images + # traefik is already included in the K3s airgap images bundle "curlimages/curl:8.5.0" - # Base images for Docker build + "alpine/git:2.47.2" +) + +# Base images only needed for local Docker build, not for runtime or bundle +BUILD_ONLY_IMAGES=( "node:20-alpine" "ubuntu:24.04" "quay.io/jupyter/base-notebook" ) -# Combined list for backward compatibility -IMAGES=("${CUSTOM_IMAGES[@]}") - # GPU configuration globals (set by detect_and_configure_gpu) ACCEL_KEY="" GPU_TARGET="" ACCEL_ENV="" +# ============================================================ +# Offline Bundle Detection +# ============================================================ + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +OFFLINE_MODE=0 +BUNDLE_DIR="" + +function detect_offline_bundle() { + if [[ ! -f "${SCRIPT_DIR}/manifest.json" ]]; then + return + fi + + BUNDLE_DIR="${SCRIPT_DIR}" + OFFLINE_MODE=1 + K3S_USE_DOCKER=0 + echo "Offline bundle detected at: ${BUNDLE_DIR}" + + # Parse config from manifest without python + local gpu_target accel_key accel_env image_registry image_tag + gpu_target=$(sed -n 's/.*"gpu_target"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p' "${BUNDLE_DIR}/manifest.json") + accel_key=$(sed -n 's/.*"accel_key"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p' "${BUNDLE_DIR}/manifest.json") + accel_env=$(sed -n 's/.*"accel_env"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p' "${BUNDLE_DIR}/manifest.json") + image_registry=$(sed -n 's/.*"image_registry"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p' "${BUNDLE_DIR}/manifest.json") + image_tag=$(sed -n 's/.*"image_tag"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p' "${BUNDLE_DIR}/manifest.json") + + [[ -n "${image_registry}" ]] && IMAGE_REGISTRY="${image_registry}" + [[ -n "${image_tag}" ]] && IMAGE_TAG="${image_tag}" + + if [[ -n "${gpu_target}" ]]; then + GPU_TARGET="${gpu_target}" + ACCEL_KEY="${accel_key}" + ACCEL_ENV="${accel_env}" + echo " GPU config: accelerator=${ACCEL_KEY}, GPU_TARGET=${GPU_TARGET}${ACCEL_ENV:+, HSA_OVERRIDE=${ACCEL_ENV}}" + fi +} + +# ============================================================ +# GPU Detection & Configuration +# ============================================================ + function detect_gpu() { - # Try rocminfo first (most readable output) if command -v rocminfo &>/dev/null; then local gfx gfx=$(rocminfo 2>/dev/null | grep -o 'gfx[0-9]*' | head -1) @@ -137,9 +180,14 @@ function resolve_gpu_config() { GPU_TARGET="gfx1151" ACCEL_ENV="" ;; + gfx1201|gfx1200|rdna4|dgpu) + ACCEL_KEY="dgpu" + GPU_TARGET="gfx120x" + ACCEL_ENV="" + ;; *) echo "Error: Unsupported GPU type: $input" >&2 - echo "Supported: phx (gfx1100-1103), strix (gfx1150), strix-halo (gfx1151)" >&2 + echo "Supported: phx (gfx1100-1103), strix (gfx1150), strix-halo (gfx1151), rdna4 (gfx1201)" >&2 exit 1 ;; esac @@ -166,11 +214,18 @@ function detect_and_configure_gpu() { echo " accelerator=${ACCEL_KEY}, GPU_TARGET=${GPU_TARGET}${ACCEL_ENV:+, HSA_OVERRIDE=${ACCEL_ENV}}" } +# ============================================================ +# Values Overlay +# ============================================================ + function generate_values_overlay() { local overlay_path="runtime/values.local.yaml" + if [[ "${OFFLINE_MODE}" == "1" ]]; then + overlay_path="${BUNDLE_DIR}/config/values.local.yaml" + fi echo "Generating values overlay: ${overlay_path}" - local tag="latest-${GPU_TARGET}" + local tag="${IMAGE_TAG}-${GPU_TARGET}" { echo "# Auto-generated by auplc-installer (GPU: ${ACCEL_KEY}, target: ${GPU_TARGET})" @@ -186,33 +241,50 @@ function generate_values_overlay() { echo " resources:" echo " images:" - echo " gpu: \"ghcr.io/amdresearch/auplc-base:${tag}\"" - echo " Course-CV: \"ghcr.io/amdresearch/auplc-cv:${tag}\"" - echo " Course-DL: \"ghcr.io/amdresearch/auplc-dl:${tag}\"" - echo " Course-LLM: \"ghcr.io/amdresearch/auplc-llm:${tag}\"" - echo " Course-PhySim: \"ghcr.io/amdresearch/auplc-physim:${tag}\"" + echo " gpu: \"${IMAGE_REGISTRY}/auplc-base:${tag}\"" + echo " Course-CV: \"${IMAGE_REGISTRY}/auplc-cv:${tag}\"" + echo " Course-DL: \"${IMAGE_REGISTRY}/auplc-dl:${tag}\"" + echo " Course-LLM: \"${IMAGE_REGISTRY}/auplc-llm:${tag}\"" + echo " Course-PhySim: \"${IMAGE_REGISTRY}/auplc-physim:${tag}\"" echo " metadata:" for resource in gpu Course-CV Course-DL Course-LLM Course-PhySim; do echo " ${resource}:" echo " acceleratorKeys:" echo " - ${ACCEL_KEY}" done + if [[ "${OFFLINE_MODE}" == "1" ]]; then + echo "hub:" + echo " image:" + echo " name: \"${IMAGE_REGISTRY}/auplc-hub\"" + echo " tag: \"${IMAGE_TAG}\"" + echo " pullPolicy: IfNotPresent" + fi } > "${overlay_path}" } -function check_root() { - if [[ $EUID -ne 0 ]]; then - echo "Error: This script must be run as root." >&2 - exit 1 - fi -} +# ============================================================ +# Tool Installation (Helm, K9s) +# ============================================================ function install_tools() { echo "Checking/Installing tools (may require sudo)..." + if [[ "${OFFLINE_MODE}" == "1" ]]; then + if ! command -v helm &> /dev/null; then + echo "Installing Helm from bundle..." + sudo cp "${BUNDLE_DIR}/bin/helm" /usr/local/bin/helm + sudo chmod +x /usr/local/bin/helm + fi + if ! command -v k9s &> /dev/null; then + echo "Installing K9s from bundle..." + sudo dpkg -i "${BUNDLE_DIR}/bin/k9s_linux_amd64.deb" + fi + return + fi + if ! command -v helm &> /dev/null; then echo "Installing Helm..." - wget https://get.helm.sh/helm-v3.17.2-linux-amd64.tar.gz -O /tmp/helm-linux-amd64.tar.gz + wget https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz -O /tmp/helm-linux-amd64.tar.gz tar -zxvf /tmp/helm-linux-amd64.tar.gz -C /tmp sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm rm /tmp/helm-linux-amd64.tar.gz @@ -221,16 +293,17 @@ function install_tools() { if ! command -v k9s &> /dev/null; then echo "Installing K9s..." - wget https://github.com/derailed/k9s/releases/latest/download/k9s_linux_amd64.deb -O /tmp/k9s_linux_amd64.deb + wget "https://github.com/derailed/k9s/releases/download/${K9S_VERSION}/k9s_linux_amd64.deb" -O /tmp/k9s_linux_amd64.deb sudo apt install /tmp/k9s_linux_amd64.deb -y rm /tmp/k9s_linux_amd64.deb fi } -function configure_registry_mirrors() { - # Configure K3s registry mirrors using MIRROR_PREFIX - # This must be done BEFORE k3s starts +# ============================================================ +# K3s Management +# ============================================================ +function configure_registry_mirrors() { if [[ -z "${MIRROR_PREFIX}" ]]; then echo "No registry mirror configured. Using default registries." return 0 @@ -239,7 +312,6 @@ function configure_registry_mirrors() { echo "Configuring registry mirrors with prefix: ${MIRROR_PREFIX}" sudo mkdir -p "$(dirname "${K3S_REGISTRIES_FILE}")" - # Configure mirrors for all registries using the prefix pattern local config="mirrors: docker.io: endpoint: @@ -258,15 +330,9 @@ function configure_registry_mirrors() { echo "Registry mirrors configured at ${K3S_REGISTRIES_FILE}" } -# Dummy interface IP for K3s node binding -# Using a private IP range that won't conflict with typical networks K3S_NODE_IP="10.255.255.1" function setup_dummy_interface() { - # Create a dummy network interface for offline/portable operation - # This provides a stable node IP that doesn't change when WiFi/network changes - # Reference: https://docs.k3s.io/installation/airgap - if ip link show dummy0 &>/dev/null; then echo "Dummy interface already exists, skipping setup" return 0 @@ -276,10 +342,7 @@ function setup_dummy_interface() { sudo ip link add dummy0 type dummy sudo ip link set dummy0 up sudo ip addr add "${K3S_NODE_IP}/32" dev dummy0 - # Add a low-priority default route so K3s can detect a valid route - sudo ip route add default via "${K3S_NODE_IP}" dev dummy0 metric 1000 2>/dev/null || true - # Make persistent across reboots cat << EOF | sudo tee /etc/systemd/system/dummy-interface.service > /dev/null [Unit] Description=Setup dummy network interface for K3s portable operation @@ -289,7 +352,7 @@ After=network.target [Service] Type=oneshot RemainAfterExit=yes -ExecStart=/bin/bash -c 'ip link show dummy0 || (ip link add dummy0 type dummy && ip link set dummy0 up && ip addr add ${K3S_NODE_IP}/32 dev dummy0 && ip route add default via ${K3S_NODE_IP} dev dummy0 metric 1000 2>/dev/null || true)' +ExecStart=/bin/bash -c 'ip link show dummy0 || (ip link add dummy0 type dummy && ip link set dummy0 up && ip addr add ${K3S_NODE_IP}/32 dev dummy0)' ExecStop=/bin/bash -c 'ip link del dummy0 2>/dev/null || true' [Install] @@ -303,30 +366,41 @@ EOF function install_k3s_single_node() { echo "Starting K3s installation..." - if [[ "${K3S_USE_DOCKER}" == "1" ]]; then - echo "Using Docker as container runtime (K3S_USE_DOCKER=1). Images stay in Docker; no export to agent/images." - if ! command -v docker &> /dev/null; then - echo "Error: K3S_USE_DOCKER is set but Docker is not installed. Install Docker first." >&2 - exit 1 - fi - fi - - # Setup dummy interface for offline operation setup_dummy_interface + local k3s_exec="--node-ip=${K3S_NODE_IP} --flannel-iface=dummy0" - # Configure registry mirrors before starting k3s - configure_registry_mirrors + if [[ "${OFFLINE_MODE}" == "1" ]]; then + echo "Offline mode: installing K3s from bundle (containerd)..." - # Build K3s server exec flags (--docker = use host Docker so image updates are visible in dev) - local k3s_exec="--node-ip=${K3S_NODE_IP} --flannel-iface=dummy0" - if [[ "${K3S_USE_DOCKER}" == "1" ]]; then - k3s_exec="${k3s_exec} --docker" - fi + sudo cp "${BUNDLE_DIR}/bin/k3s" /usr/local/bin/k3s + sudo chmod +x /usr/local/bin/k3s - # Bind K3s to dummy interface IP for portable operation - # With --docker, K3s uses host Docker; image updates (e.g. make hub) are visible without re-export. - curl -sfL https://get.k3s.io | sudo K3S_KUBECONFIG_MODE="644" \ - INSTALL_K3S_EXEC="${k3s_exec}" sh - + sudo mkdir -p "${K3S_IMAGES_DIR}" + for img_file in "${BUNDLE_DIR}"/k3s-images/*; do + [[ -f "${img_file}" ]] || continue + echo " Copying: $(basename "${img_file}")" + sudo cp "${img_file}" "${K3S_IMAGES_DIR}/" + done + + sudo INSTALL_K3S_SKIP_DOWNLOAD=true \ + K3S_KUBECONFIG_MODE="644" \ + INSTALL_K3S_EXEC="${k3s_exec}" \ + bash "${BUNDLE_DIR}/bin/k3s-install.sh" + else + if [[ "${K3S_USE_DOCKER}" == "1" ]]; then + echo "Using Docker as container runtime (K3S_USE_DOCKER=1)." + if ! command -v docker &> /dev/null; then + echo "Error: K3S_USE_DOCKER is set but Docker is not installed." >&2 + exit 1 + fi + k3s_exec="${k3s_exec} --docker" + fi + + configure_registry_mirrors + + curl -sfL https://get.k3s.io | sudo K3S_KUBECONFIG_MODE="644" \ + INSTALL_K3S_EXEC="${k3s_exec}" sh - + fi echo "Configuring kubeconfig for user: $(whoami)" mkdir -p "$HOME/.kube" @@ -334,6 +408,60 @@ function install_k3s_single_node() { sudo chown "$(id -u):$(id -g)" "$HOME/.kube/config" } +function remove_k3s_docker_containers() { + # k3s-uninstall.sh only cleans up its embedded containerd; when k3s is + # configured to use Docker as the container runtime (--docker flag), Pod + # containers appear in `docker ps` with a "k8s_" prefix and are NOT + # removed by the k3s uninstall script. This is a known upstream issue: + # https://github.com/k3s-io/k3s/issues/1469 + if ! command -v docker &>/dev/null; then + return 0 + fi + + # Filter by the Kubernetes-specific label that kubelet stamps on every + # container it creates via dockershim/cri-dockerd. This is more precise + # than matching the "k8s_" name prefix, which could accidentally catch + # user containers with similar names. + local k8s_containers + k8s_containers=$(docker ps -a -q --filter "label=io.kubernetes.pod.name" 2>/dev/null || true) + if [[ -z "$k8s_containers" ]]; then + return 0 + fi + + echo "" + echo "The following Docker containers managed by Kubernetes were found." + echo "These are Pod containers left behind by k3s (Docker runtime mode)." + echo "" + docker ps -a --filter "label=io.kubernetes.pod.name" --format " {{.ID}} {{.Names}}" + echo "" + + local confirm + if [[ "${AUPLC_YES}" == "1" ]]; then + echo "Non-interactive mode (--yes): removing containers automatically." + confirm="y" + elif [[ ! -t 0 ]]; then + echo "Non-interactive environment detected. Skipping Docker container cleanup." + echo "To remove them manually, run:" + echo " docker rm -f \$(docker ps -a -q --filter 'label=io.kubernetes.pod.name')" + return 0 + else + read -r -p "Remove all of the above containers? [y/N] " confirm + fi + + if [[ "${confirm,,}" != "y" ]]; then + echo "Skipping Docker container cleanup. You can remove them manually with:" + echo " docker rm -f \$(docker ps -a -q --filter 'label=io.kubernetes.pod.name')" + return 0 + fi + + echo "Stopping and removing containers..." + # shellcheck disable=SC2086 + docker stop $k8s_containers 2>/dev/null || true + # shellcheck disable=SC2086 + docker rm $k8s_containers 2>/dev/null || true + echo "Docker containers removed." +} + function remove_k3s() { local uninstall_script="/usr/local/bin/k3s-uninstall.sh" @@ -345,6 +473,8 @@ function remove_k3s() { echo "K3s uninstall script not found at $uninstall_script. Is K3s installed?" fi + remove_k3s_docker_containers + if [[ -d "$HOME/.kube" ]]; then echo "Removing kubeconfig files from $HOME/.kube..." rm -rf "$HOME/.kube" @@ -362,7 +492,6 @@ function remove_k3s() { echo "Removing K3S local data" sudo rm -rf /var/lib/rancher/k3s - # Remove dummy interface service if [[ -f /etc/systemd/system/dummy-interface.service ]]; then echo "Removing dummy interface service..." sudo systemctl disable dummy-interface.service 2>/dev/null || true @@ -370,13 +499,16 @@ function remove_k3s() { sudo systemctl daemon-reload fi - # Remove dummy interface if ip link show dummy0 &>/dev/null; then echo "Removing dummy interface..." sudo ip link del dummy0 fi } +# ============================================================ +# GPU Device Plugin +# ============================================================ + function deploy_rocm_gpu_device_plugin() { echo "Deploying ROCm GPU device plugin..." @@ -385,7 +517,14 @@ function deploy_rocm_gpu_device_plugin() { return 0 fi - kubectl create -f https://raw.githubusercontent.com/ROCm/k8s-device-plugin/master/k8s-ds-amdgpu-dp.yaml + if [[ "${OFFLINE_MODE}" == "1" ]]; then + kubectl create -f "${BUNDLE_DIR}/manifests/k8s-ds-amdgpu-dp.yaml" + # Patch imagePullPolicy to avoid pulling from registry in air-gapped environments + kubectl patch ds amdgpu-device-plugin-daemonset -n kube-system --type=json \ + -p '[{"op":"replace","path":"/spec/template/spec/containers/0/imagePullPolicy","value":"IfNotPresent"}]' + else + kubectl create -f https://raw.githubusercontent.com/ROCm/k8s-device-plugin/master/k8s-ds-amdgpu-dp.yaml + fi if ! kubectl wait --for=jsonpath='{.status.numberReady}'=1 --namespace=kube-system ds/amdgpu-device-plugin-daemonset --timeout=300s | grep "condition met"; then exit 1 @@ -394,34 +533,51 @@ function deploy_rocm_gpu_device_plugin() { fi } -function deply_aup_learning_cloud_runtime() { - detect_and_configure_gpu - generate_values_overlay - - echo "Deploying AUP Learning Cloud Runtime..." +# ============================================================ +# Image Helpers +# ============================================================ - helm install jupyterhub runtime/chart --namespace jupyterhub \ - --create-namespace -f runtime/values.yaml -f runtime/values.local.yaml +# Apply MIRROR_PREFIX to an image reference for pulling +function resolve_pull_ref() { + local image="$1" + local full_image="${image}" + local first_segment="${image%%/*}" - echo "Waiting for JupyterHub deployments to be ready..." - kubectl wait --namespace jupyterhub \ - --for=condition=available --timeout=600s \ - deployment/hub deployment/proxy deployment/user-scheduler + if [[ "${image}" == *"/"* ]]; then + [[ "${first_segment}" != *"."* ]] && full_image="docker.io/${image}" + else + full_image="docker.io/library/${image}" + fi - kubectl label "$(kubectl get nodes -o name)" node-type="${ACCEL_KEY}" --overwrite + if [[ -n "${MIRROR_PREFIX}" ]]; then + echo "${MIRROR_PREFIX}/${full_image}" + else + echo "${full_image}" + fi } -function upgrade_aup_learning_cloud_runtime() { - detect_and_configure_gpu - generate_values_overlay +# Pull a single image, apply mirror prefix, tag back to original name. +# Returns 0 on success, 1 on failure. +function pull_and_tag() { + local image="$1" + local pull_ref + pull_ref=$(resolve_pull_ref "${image}") + + echo " Pulling: ${pull_ref}" + if ! docker pull "${pull_ref}"; then + echo " FAILED: ${image}" + return 1 + fi - helm upgrade jupyterhub runtime/chart --namespace jupyterhub \ - --create-namespace -f runtime/values.yaml -f runtime/values.local.yaml + if [[ "${pull_ref}" != "${image}" ]]; then + docker tag "${pull_ref}" "${image}" + fi + return 0 } -function remove_aup_learning_cloud_runtime() { - helm uninstall jupyterhub --namespace jupyterhub -} +# ============================================================ +# Image: Local Build +# ============================================================ # Build local images. Optional: list of Makefile targets (e.g. hub, cv, base-cpu). Default: all. function local_image_build() { @@ -433,7 +589,6 @@ function local_image_build() { local targets=("${@:-all}") echo "Building local images: ${targets[*]}" - # When using Docker runtime, images stay in Docker; no need to export to K3S_IMAGES_DIR if [[ "${K3S_USE_DOCKER}" != "1" ]]; then if [ ! -d "${K3S_IMAGES_DIR}" ]; then sudo mkdir -p "${K3S_IMAGES_DIR}" @@ -443,7 +598,6 @@ function local_image_build() { echo "Build images in Docker (K3S_USE_DOCKER=1; K3s will use them directly)" fi - # Makefile: SAVE_IMAGES=1 and K3S_IMAGES_DIR only when not using Docker backend (containerd + export) local save_images_for_make="" local images_dir_for_make="" if [[ "${K3S_USE_DOCKER}" != "1" ]]; then @@ -457,7 +611,6 @@ function local_image_build() { GPU_TARGET="${GPU_TARGET}" \ SAVE_IMAGES="${save_images_for_make}" \ K3S_IMAGES_DIR="${images_dir_for_make}" \ - IMAGES="${IMAGES[*]}" \ MIRROR_PREFIX="${MIRROR_PREFIX}" \ MIRROR_PIP="${MIRROR_PIP}" \ MIRROR_NPM="${MIRROR_NPM}" \ @@ -466,21 +619,85 @@ function local_image_build() { echo "-------------------------------------------" } -function pull_external_images() { - # Pull external images. When K3S_USE_DOCKER=1, keep in Docker only; else also save to K3S_IMAGES_DIR for offline. +# ============================================================ +# Image: Pull from GHCR (custom images) +# ============================================================ +function pull_custom_images() { if ! command -v docker &> /dev/null; then echo "Please install docker" exit 1 fi + detect_and_configure_gpu + local tag="${IMAGE_TAG}-${GPU_TARGET}" + echo "===========================================" - echo "Pulling external images..." - if [[ "${K3S_USE_DOCKER}" == "1" ]]; then - echo "K3S_USE_DOCKER=1: images stay in Docker (no export to K3s image dir)" + echo "Pulling pre-built custom images from GHCR..." + echo " GPU_TARGET=${GPU_TARGET}, tag=${tag}" + echo "===========================================" + + if [[ "${K3S_USE_DOCKER}" != "1" && ! -d "${K3S_IMAGES_DIR}" ]]; then + sudo mkdir -p "${K3S_IMAGES_DIR}" + fi + + local failed_images=() + + # GPU-specific images: pull :latest-, also tag as :latest + for name in "${GPU_CUSTOM_NAMES[@]}"; do + local image="${IMAGE_REGISTRY}/${name}:${tag}" + if pull_and_tag "${image}"; then + docker tag "${image}" "${IMAGE_REGISTRY}/${name}:latest" + + if [[ "${K3S_USE_DOCKER}" != "1" ]]; then + sudo docker save \ + "${IMAGE_REGISTRY}/${name}:latest" \ + "${IMAGE_REGISTRY}/${name}:${tag}" \ + -o "${K3S_IMAGES_DIR}/${name}.tar" + fi + else + failed_images+=("${image}") + fi + done + + # Non-GPU images: pull : + for name in "${PLAIN_CUSTOM_NAMES[@]}"; do + local image="${IMAGE_REGISTRY}/${name}:${IMAGE_TAG}" + if pull_and_tag "${image}"; then + if [[ "${K3S_USE_DOCKER}" != "1" ]]; then + sudo docker save "${image}" -o "${K3S_IMAGES_DIR}/${name}.tar" + fi + else + failed_images+=("${image}") + fi + done + + echo "===========================================" + if [[ ${#failed_images[@]} -eq 0 ]]; then + echo "All custom images pulled successfully!" else - echo "Saving to K3s image pool for offline deployment" + echo "Failed images:" + for img in "${failed_images[@]}"; do echo " - ${img}"; done + echo "Warning: Some custom images failed." fi + echo "===========================================" +} + +# ============================================================ +# Image: Pull External Images +# ============================================================ + +function pull_external_images() { + if ! command -v docker &> /dev/null; then + echo "Please install docker" + exit 1 + fi + + # When called during 'install --pull', skip build-only images + local skip_build_only="${1:-0}" + + echo "===========================================" + echo "Pulling external images..." if [[ -n "${MIRROR_PREFIX}" ]]; then echo "Using mirror prefix: ${MIRROR_PREFIX}" fi @@ -490,46 +707,37 @@ function pull_external_images() { sudo mkdir -p "${K3S_IMAGES_DIR}" fi + # Build image list, combining EXTERNAL_IMAGES + optionally BUILD_ONLY_IMAGES + local images_to_pull=("${EXTERNAL_IMAGES[@]}") + if [[ "${skip_build_only}" != "1" ]]; then + images_to_pull+=("${BUILD_ONLY_IMAGES[@]}") + fi + local failed_images=() - for image in "${EXTERNAL_IMAGES[@]}"; do - # Determine the full image path for pulling with mirror - # Images without registry prefix are from docker.io + for image in "${images_to_pull[@]}"; do local full_image="${image}" local first_segment="${image%%/*}" if [[ "${image}" == *"/"* ]]; then - # Has slash - check if first segment looks like a registry (contains a dot) - if [[ "${first_segment}" != *"."* ]]; then - # No dot in first segment, it's docker.io (e.g., curlimages/curl) - full_image="docker.io/${image}" - fi + [[ "${first_segment}" != *"."* ]] && full_image="docker.io/${image}" else - # No slash - it's an official docker image (e.g., traefik:v3.3.1) full_image="docker.io/library/${image}" fi - # Apply mirror prefix if set local pull_image="${full_image}" - if [[ -n "${MIRROR_PREFIX}" ]]; then - pull_image="${MIRROR_PREFIX}/${full_image}" - fi + [[ -n "${MIRROR_PREFIX}" ]] && pull_image="${MIRROR_PREFIX}/${full_image}" echo "-------------------------------------------" echo "Pulling: ${pull_image}" if docker pull "${pull_image}"; then - # Tag to original name so K3s can use it - if [[ "${pull_image}" != "${image}" ]]; then - docker tag "${pull_image}" "${image}" - fi + [[ "${pull_image}" != "${image}" ]] && docker tag "${pull_image}" "${image}" - # Also tag to mirror-prefixed name so Docker build with MIRROR_PREFIX can use local cache if [[ -n "${MIRROR_PREFIX}" && "${pull_image}" != "${MIRROR_PREFIX}/${full_image}" ]]; then docker tag "${pull_image}" "${MIRROR_PREFIX}/${full_image}" fi - # Save to K3S_IMAGES_DIR only when not using Docker backend (so K3s can load at boot) if [[ "${K3S_USE_DOCKER}" != "1" && -n "${K3S_IMAGES_DIR}" ]]; then local filename filename=$(echo "${image}" | sed 's/[\/:]/-/g').tar @@ -555,23 +763,121 @@ function pull_external_images() { echo "All external images pulled and saved successfully!" else echo "Failed images:" - for img in "${failed_images[@]}"; do - echo " - ${img}" - done + for img in "${failed_images[@]}"; do echo " - ${img}"; done echo "Warning: Some images failed. Deployment may require internet access." fi echo "===========================================" } +# ============================================================ +# Image: Load from Offline Bundle +# ============================================================ + +function load_offline_images() { + echo "===========================================" + echo "Loading images from offline bundle..." + echo "===========================================" + + local loaded=0 failed=0 + + for tar_file in "${BUNDLE_DIR}/images/custom"/*.tar "${BUNDLE_DIR}/images/external"/*.tar; do + [[ -f "${tar_file}" ]] || continue + echo " Importing: $(basename "${tar_file}")" + if sudo k3s ctr images import "${tar_file}" 2>/dev/null; then + loaded=$((loaded + 1)) + else + echo " Failed!" + failed=$((failed + 1)) + fi + done + + echo "===========================================" + echo "Loaded ${loaded} images, ${failed} failed" + if [[ "${failed}" -gt 0 ]]; then + echo "Error: ${failed} image(s) failed to import. Bundle may be corrupted." >&2 + exit 1 + fi + echo "===========================================" +} + +# ============================================================ +# Runtime Management +# ============================================================ + +# ============================================================ +# Runtime Management +# ============================================================ + +# Resolve chart/values paths (bundle or local repo) +function get_runtime_paths() { + if [[ "${OFFLINE_MODE}" == "1" ]]; then + CHART_PATH="${BUNDLE_DIR}/chart" + VALUES_PATH="${BUNDLE_DIR}/config/values.yaml" + OVERLAY_PATH="${BUNDLE_DIR}/config/values.local.yaml" + else + CHART_PATH="runtime/chart" + VALUES_PATH="runtime/values.yaml" + OVERLAY_PATH="runtime/values.local.yaml" + fi +} + +function deploy_aup_learning_cloud_runtime() { + echo "Deploying AUP Learning Cloud Runtime..." + + helm install jupyterhub "${CHART_PATH}" --namespace jupyterhub \ + --create-namespace -f "${VALUES_PATH}" -f "${OVERLAY_PATH}" + + echo "Waiting for JupyterHub deployments to be ready..." + kubectl wait --namespace jupyterhub \ + --for=condition=available --timeout=600s \ + deployment/hub deployment/proxy deployment/user-scheduler + + kubectl label "$(kubectl get nodes -o name)" node-type="${ACCEL_KEY}" --overwrite +} + +function upgrade_aup_learning_cloud_runtime() { + detect_and_configure_gpu + get_runtime_paths + generate_values_overlay + + helm upgrade jupyterhub "${CHART_PATH}" --namespace jupyterhub \ + --create-namespace -f "${VALUES_PATH}" -f "${OVERLAY_PATH}" +} + +function remove_aup_learning_cloud_runtime() { + helm uninstall jupyterhub --namespace jupyterhub +} + +# ============================================================ +# Deployment Orchestration +# ============================================================ + function deploy_all_components() { + if [[ $EUID -ne 0 ]]; then + echo "Error: This script must be run as root." >&2 + exit 1 + fi + + local flag="${1:-}" + detect_and_configure_gpu + get_runtime_paths generate_values_overlay install_tools install_k3s_single_node + + if [[ "${OFFLINE_MODE}" == "1" ]]; then + load_offline_images + elif [[ "${flag}" == "--pull" ]]; then + pull_custom_images + pull_external_images 1 # skip build-only images + else + pull_external_images + local_image_build + fi + deploy_rocm_gpu_device_plugin - pull_external_images - local_image_build - deply_aup_learning_cloud_runtime + deploy_aup_learning_cloud_runtime } function remove_all_components() { @@ -579,77 +885,405 @@ function remove_all_components() { remove_k3s } +# ============================================================ +# Pack: Create Offline Bundle +# ============================================================ + +function pack_download_binaries() { + local staging="$1" + local k3s_url_ver + k3s_url_ver=$(echo "${K3S_VERSION}" | sed 's/+/%2B/g') + + echo "--- Downloading binaries ---" + mkdir -p "${staging}/bin" + + echo " K3s ${K3S_VERSION}..." + wget -q "https://github.com/k3s-io/k3s/releases/download/${k3s_url_ver}/k3s" \ + -O "${staging}/bin/k3s" + chmod +x "${staging}/bin/k3s" + + echo " K3s install script..." + wget -q "https://get.k3s.io" -O "${staging}/bin/k3s-install.sh" + chmod +x "${staging}/bin/k3s-install.sh" + + echo " Helm ${HELM_VERSION}..." + wget -q "https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz" -O /tmp/helm-pack.tar.gz + tar -zxf /tmp/helm-pack.tar.gz -C /tmp linux-amd64/helm + mv /tmp/linux-amd64/helm "${staging}/bin/helm" + chmod +x "${staging}/bin/helm" + rm -rf /tmp/helm-pack.tar.gz /tmp/linux-amd64 + + echo " K9s ${K9S_VERSION}..." + wget -q "https://github.com/derailed/k9s/releases/download/${K9S_VERSION}/k9s_linux_amd64.deb" \ + -O "${staging}/bin/k9s_linux_amd64.deb" +} + +function pack_download_k3s_images() { + local staging="$1" + local k3s_url_ver + k3s_url_ver=$(echo "${K3S_VERSION}" | sed 's/+/%2B/g') + + echo "--- Downloading K3s airgap images ---" + mkdir -p "${staging}/k3s-images" + + wget -q "https://github.com/k3s-io/k3s/releases/download/${k3s_url_ver}/k3s-airgap-images-amd64.tar.zst" \ + -O "${staging}/k3s-images/k3s-airgap-images-amd64.tar.zst" +} + +function pack_save_manifests() { + local staging="$1" + echo "--- Saving manifests ---" + mkdir -p "${staging}/manifests" + + wget -q "https://raw.githubusercontent.com/ROCm/k8s-device-plugin/master/k8s-ds-amdgpu-dp.yaml" \ + -O "${staging}/manifests/k8s-ds-amdgpu-dp.yaml" + echo " Saved ROCm device plugin DaemonSet." +} + +function pack_copy_chart() { + local staging="$1" + echo "--- Copying chart and config ---" + + cp -r runtime/chart "${staging}/chart" + mkdir -p "${staging}/config" + cp runtime/values.yaml "${staging}/config/values.yaml" +} + +# Save custom images: pull from GHCR, then docker save +# All images are saved into a single tar to deduplicate shared layers. +function pack_save_custom_images_pull() { + local staging="$1" + local tag="${IMAGE_TAG}-${GPU_TARGET}" + + echo "--- Pulling and saving custom images (${IMAGE_REGISTRY}) ---" + mkdir -p "${staging}/images/custom" + + local failed=0 + local all_refs=() + + for name in "${GPU_CUSTOM_NAMES[@]}"; do + local image="${IMAGE_REGISTRY}/${name}:${tag}" + if pull_and_tag "${image}"; then + docker tag "${image}" "${IMAGE_REGISTRY}/${name}:latest" + all_refs+=("${IMAGE_REGISTRY}/${name}:latest" "${IMAGE_REGISTRY}/${name}:${tag}") + echo " Pulled: ${name} (:latest + :${tag})" + else + failed=$((failed + 1)) + fi + done + + for name in "${PLAIN_CUSTOM_NAMES[@]}"; do + local image="${IMAGE_REGISTRY}/${name}:${IMAGE_TAG}" + if pull_and_tag "${image}"; then + docker tag "${image}" "${IMAGE_REGISTRY}/${name}:latest" + all_refs+=("${IMAGE_REGISTRY}/${name}:latest" "${IMAGE_REGISTRY}/${name}:${IMAGE_TAG}") + echo " Pulled: ${name} (:latest + :${IMAGE_TAG})" + else + failed=$((failed + 1)) + fi + done + + if [[ "${failed}" -gt 0 ]]; then + echo "Error: ${failed} custom image(s) failed to pull. Bundle would be incomplete." >&2 + echo " Check that IMAGE_REGISTRY (${IMAGE_REGISTRY}) is correct and you have pull access." >&2 + rm -rf "${staging}" + exit 1 + fi + + echo " Saving all custom images (shared layers deduplicated)..." + docker save "${all_refs[@]}" -o "${staging}/images/custom/auplc-custom.tar" + echo " Saved: ${staging}/images/custom/auplc-custom.tar" +} + +# Save custom images: build locally via Makefile, then docker save +function pack_save_custom_images_local() { + local staging="$1" + local tag="latest-${GPU_TARGET}" + + echo "--- Building and saving custom images locally ---" + mkdir -p "${staging}/images/custom" + + # Build all images to Docker daemon (no K3s export) + (cd dockerfiles/ && make \ + GPU_TARGET="${GPU_TARGET}" \ + MIRROR_PREFIX="${MIRROR_PREFIX}" \ + MIRROR_PIP="${MIRROR_PIP}" \ + MIRROR_NPM="${MIRROR_NPM}" \ + all) + + echo "--- Saving built images to bundle (shared layers deduplicated) ---" + + local all_refs=() + + for name in "${GPU_CUSTOM_NAMES[@]}"; do + all_refs+=("${IMAGE_REGISTRY}/${name}:latest" "${IMAGE_REGISTRY}/${name}:${tag}") + echo " Queued: ${name} (:latest + :${tag})" + done + + for name in "${PLAIN_CUSTOM_NAMES[@]}"; do + docker tag "${IMAGE_REGISTRY}/${name}:latest" "${IMAGE_REGISTRY}/${name}:${IMAGE_TAG}" + all_refs+=("${IMAGE_REGISTRY}/${name}:latest" "${IMAGE_REGISTRY}/${name}:${IMAGE_TAG}") + echo " Queued: ${name} (:latest + :${IMAGE_TAG})" + done + + docker save "${all_refs[@]}" -o "${staging}/images/custom/auplc-custom.tar" + echo " Saved: ${staging}/images/custom/auplc-custom.tar" +} + +# Save external images (always pulled from registries) +function pack_save_external_images() { + local staging="$1" + echo "--- Pulling and saving external images ---" + mkdir -p "${staging}/images/external" + + # Build list: runtime external images (skip build-only) + local pack_images=("${EXTERNAL_IMAGES[@]}") + + # Extract ROCm device plugin image from saved manifest + if [[ -f "${staging}/manifests/k8s-ds-amdgpu-dp.yaml" ]]; then + local dp_image + dp_image=$(sed -n 's/.*image:[[:space:]]*\([^ ]*\).*/\1/p' "${staging}/manifests/k8s-ds-amdgpu-dp.yaml" | head -1) + if [[ -n "${dp_image}" ]]; then + echo " Found device plugin image: ${dp_image}" + pack_images+=("${dp_image}") + fi + fi + + local failed_images=() + for image in "${pack_images[@]}"; do + if pull_and_tag "${image}"; then + local filename + filename=$(echo "${image}" | sed 's/[\/:]/-/g').tar + docker save "${image}" -o "${staging}/images/external/${filename}" + echo " Saved: ${image}" + else + failed_images+=("${image}") + fi + done + + if [[ ${#failed_images[@]} -gt 0 ]]; then + echo "Error: ${#failed_images[@]} external image(s) failed to pull:" >&2 + for img in "${failed_images[@]}"; do echo " - ${img}" >&2; done + rm -rf "${staging}" + exit 1 + fi +} + +function pack_write_manifest() { + local staging="$1" + cat > "${staging}/manifest.json" << EOF +{ + "format_version": "1", + "build_date": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")", + "gpu_target": "${GPU_TARGET}", + "accel_key": "${ACCEL_KEY}", + "accel_env": "${ACCEL_ENV}", + "image_registry": "${IMAGE_REGISTRY}", + "image_tag": "${IMAGE_TAG}", + "k3s_version": "${K3S_VERSION}", + "helm_version": "${HELM_VERSION}", + "k9s_version": "${K9S_VERSION}" +} +EOF +} + +function pack_bundle() { + local flag="${1:-}" + + # Sanitize IMAGE_TAG: Docker tags cannot contain '/' (e.g. branch names) + IMAGE_TAG="${IMAGE_TAG//\//-}" + + echo "===========================================" + echo "AUP Learning Cloud - Pack Offline Bundle" + if [[ "${flag}" == "--local" ]]; then + echo " Image source: local build" + else + echo " Image source: pull from GHCR" + fi + echo "===========================================" + + if ! command -v docker &> /dev/null; then + echo "Error: Docker is required." >&2 + exit 1 + fi + + detect_and_configure_gpu + + local date_stamp + date_stamp=$(date +%Y%m%d) + local bundle_name="auplc-bundle-${GPU_TARGET}-${date_stamp}" + + [[ -d "${bundle_name}" ]] && rm -rf "${bundle_name}" + mkdir -p "${bundle_name}" + + # Copy installer itself + cp "${BASH_SOURCE[0]}" "${bundle_name}/auplc-installer" + chmod +x "${bundle_name}/auplc-installer" + + pack_download_binaries "${bundle_name}" + pack_download_k3s_images "${bundle_name}" + pack_save_manifests "${bundle_name}" + + if [[ "${flag}" == "--local" ]]; then + pack_save_custom_images_local "${bundle_name}" + else + pack_save_custom_images_pull "${bundle_name}" + fi + + pack_save_external_images "${bundle_name}" + pack_copy_chart "${bundle_name}" + pack_write_manifest "${bundle_name}" + + echo "===========================================" + echo "Creating archive: ${bundle_name}.tar.gz ..." + echo "===========================================" + + tar czf "${bundle_name}.tar.gz" "${bundle_name}/" + rm -rf "${bundle_name}" + + local size + size=$(du -sh "${bundle_name}.tar.gz" | cut -f1) + + echo "===========================================" + echo "Bundle created: ${bundle_name}.tar.gz (${size})" + echo "" + echo "Deploy on air-gapped machine:" + echo " tar xzf ${bundle_name}.tar.gz" + echo " cd ${bundle_name}" + echo " sudo ./auplc-installer install" + echo "===========================================" +} + +# ============================================================ +# Help +# ============================================================ + function show_help() { cat << 'EOF' -Usage: ./auplc-installer [subcommand] +Usage: ./auplc-installer [options] Commands: - install Full installation (k3s + images + runtime) - uninstall Remove everything + install [--pull] Full installation (k3s + images + runtime) + Default: build images locally via Makefile + --pull: use pre-built images from GHCR (no local build needed) + + pack [--local] Create offline deployment bundle (requires Docker + internet) + Default: pull pre-built images from GHCR + --local: build images locally then pack (needs build deps) + + uninstall Remove everything (K3s + runtime) install-tools Install helm and k9s rt install Deploy JupyterHub runtime only - rt reinstall Reinstall JupyterHub runtime (For container images changes) - rt upgrade Upgrade JupyterHub runtime (For vaules.yaml changes) + rt reinstall Reinstall JupyterHub runtime (for container image changes) + rt upgrade Upgrade JupyterHub runtime (for values.yaml changes) rt remove Remove JupyterHub runtime - img build Build all custom images - img build [target...] Build custom images (default: all). e.g. img build hub, img build hub cv + img build [target...] Build custom images (default: all) + Targets: all, hub, base-cpu, base-rocm, cv, dl, llm, physim img pull Pull external images for offline use detect-gpu Show detected GPU configuration -GPU Configuration: - GPU_TYPE Override auto-detected GPU type (phx, strix, strix-halo) - Auto-detection uses rocminfo or KFD topology. - - Examples: - GPU_TYPE=strix ./auplc-installer install - GPU_TYPE=phx ./auplc-installer img build base-rocm +Options (can also be set via environment variables): + --gpu=TYPE Override auto-detected GPU type (phx, strix, strix-halo, rdna4) + Auto-detection uses rocminfo or KFD topology. + Env: GPU_TYPE -Runtime Configuration: - K3S_USE_DOCKER Use host Docker as K3s container runtime (default: 1). - 1 = Docker mode: images built with "make hub" are visible to K3s - immediately after "rt upgrade", no export needed. - Requires Docker to be installed on the host. - 0 = containerd mode: images are exported to K3s image dir - (K3S_IMAGES_DIR) for offline/portable deployments. + --docker=0|1 Use host Docker as K3s container runtime (default: 1). + 1 = Docker mode: images visible to K3s immediately. + 0 = containerd mode: images exported for offline use. + Env: K3S_USE_DOCKER - Examples: - ./auplc-installer install # Docker mode (default) - K3S_USE_DOCKER=0 ./auplc-installer install # containerd + export mode + -y, --yes Assume yes to all prompts (for scripted/CI use). + Env: AUPLC_YES=1 -Mirror Configuration: - MIRROR_PREFIX Registry mirror (e.g. mirror.example.com) - MIRROR_PIP PyPI mirror URL - MIRROR_NPM npm registry URL + --mirror=PREFIX Registry mirror (e.g. mirror.example.com) + Env: MIRROR_PREFIX + --mirror-pip=URL PyPI mirror URL. Env: MIRROR_PIP + --mirror-npm=URL npm registry URL. Env: MIRROR_NPM - Example: - MIRROR_PREFIX="mirror.example.com" ./auplc-installer install + Examples: + ./auplc-installer install --gpu=strix-halo + ./auplc-installer install --gpu=phx --docker=0 + ./auplc-installer img build base-rocm --gpu=strix + ./auplc-installer install --mirror=mirror.example.com + +Image Registry: + IMAGE_REGISTRY Registry prefix for custom images (default: ghcr.io/amdresearch) + Override when pulling from a fork or private registry. + IMAGE_TAG Image tag prefix (default: latest). GPU suffix appended automatically. + Use "develop" for images built from the develop branch. + +Offline Deployment: + 1. On a machine with internet access, create bundle: + ./auplc-installer pack --gpu=strix-halo # pull from GHCR + ./auplc-installer pack --gpu=strix-halo --local # or build locally + + 2. Transfer bundle to air-gapped machine, then: + tar xzf auplc-bundle-gfx1151-*.tar.gz + cd auplc-bundle-gfx1151-* + sudo ./auplc-installer install EOF } +# ============================================================ +# Main +# ============================================================ + +# Parse global options (--key=value flags override environment variables) +AUPLC_YES="${AUPLC_YES:-0}" + +args=() +for arg in "$@"; do + case "$arg" in + --gpu=*) GPU_TYPE="${arg#--gpu=}" ;; + --docker=*) K3S_USE_DOCKER="${arg#--docker=}" ;; + --mirror=*) MIRROR_PREFIX="${arg#--mirror=}" ;; + --mirror-pip=*) MIRROR_PIP="${arg#--mirror-pip=}" ;; + --mirror-npm=*) MIRROR_NPM="${arg#--mirror-npm=}" ;; + -y|--yes) AUPLC_YES=1 ;; + *) args+=("$arg") ;; + esac +done +set -- "${args[@]}" + +# Detect offline bundle at startup +detect_offline_bundle + if [[ $# -eq 0 ]]; then show_help exit 1 fi case "$1" in - install) deploy_all_components ;; + install) + deploy_all_components "${2:-}" + ;; + pack) + pack_bundle "${2:-}" + ;; uninstall) remove_all_components ;; install-tools) install_tools ;; detect-gpu) detect_and_configure_gpu ;; - # New short form: rt / img rt) case "${2:-}" in - install) deply_aup_learning_cloud_runtime ;; + install) + detect_and_configure_gpu + get_runtime_paths + generate_values_overlay + deploy_aup_learning_cloud_runtime + ;; upgrade) upgrade_aup_learning_cloud_runtime ;; remove) remove_aup_learning_cloud_runtime ;; reinstall) remove_aup_learning_cloud_runtime || true sleep 0.5 - deply_aup_learning_cloud_runtime + detect_and_configure_gpu + get_runtime_paths + generate_values_overlay + deploy_aup_learning_cloud_runtime ;; *) echo "Usage: $0 rt {install|upgrade|remove|reinstall}"; exit 1 ;; esac @@ -665,7 +1299,12 @@ case "$1" in esac ;; # Legacy long form (still supported) - install-runtime) deply_aup_learning_cloud_runtime ;; + install-runtime) + detect_and_configure_gpu + get_runtime_paths + generate_values_overlay + deploy_aup_learning_cloud_runtime + ;; remove-runtime) remove_aup_learning_cloud_runtime ;; upgrade-runtime) upgrade_aup_learning_cloud_runtime ;; build-images) local_image_build ;; diff --git a/runtime/chart/values.schema.json b/runtime/chart/values.schema.json index da4321b..f42d5da 100644 --- a/runtime/chart/values.schema.json +++ b/runtime/chart/values.schema.json @@ -1,3387 +1 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "additionalProperties": false, - "required": [ - "imagePullSecrets", - "hub", - "proxy", - "singleuser", - "ingress", - "prePuller", - "custom", - "cull", - "debug", - "rbac", - "global" - ], - "properties": { - "enabled": { - "type": [ - "boolean", - "null" - ], - "description": "`enabled` is ignored by the jupyterhub chart itself, but a chart depending\non the jupyterhub chart conditionally can make use this config option as\nthe condition.\n" - }, - "fullnameOverride": { - "type": [ - "string", - "null" - ], - "description": "fullnameOverride and nameOverride allow you to adjust how the resources\npart of the Helm chart are named.\n\nName format | Resource types | fullnameOverride | nameOverride | Note\n------------------------- | -------------- | ---------------- | ------------ | -\ncomponent | namespaced | `\"\"` | * | Default\nrelease-component | cluster wide | `\"\"` | * | Default\nfullname-component | * | str | * | -\nrelease-component | * | null | `\"\"` | -\nrelease-(name-)component | * | null | str | omitted if contained in release\nrelease-(chart-)component | * | null | null | omitted if contained in release\n\n```{admonition} Warning!\n:class: warning\nChanging fullnameOverride or nameOverride after the initial installation\nof the chart isn't supported. Changing their values likely leads to a\nreset of non-external JupyterHub databases, abandonment of users' storage,\nand severed couplings to currently running user pods.\n```\n\nIf you are a developer of a chart depending on this chart, you should\navoid hardcoding names. If you want to reference the name of a resource in\nthis chart from a parent helm chart's template, you can make use of the\nglobal named templates instead.\n\n```yaml\n# some pod definition of a parent chart helm template\nschedulerName: {{ include \"jupyterhub.user-scheduler.fullname\" . }}\n```\n\nTo access them from a container, you can also rely on the hub ConfigMap\nthat contains entries of all the resource names.\n\n```yaml\n# some container definition in a parent chart helm template\nenv:\n - name: SCHEDULER_NAME\n valueFrom:\n configMapKeyRef:\n name: {{ include \"jupyterhub.user-scheduler.fullname\" . }}\n key: user-scheduler\n```\n" - }, - "nameOverride": { - "type": [ - "string", - "null" - ], - "description": "See the documentation under [`fullnameOverride`](schema_fullnameOverride).\n" - }, - "imagePullSecret": { - "type": "object", - "required": [ - "create" - ], - "if": { - "properties": { - "create": { - "const": true - } - } - }, - "then": { - "additionalProperties": false, - "required": [ - "registry", - "username", - "password" - ], - "description": "This is configuration to create a k8s Secret resource of `type:\nkubernetes.io/dockerconfigjson`, with credentials to pull images from a\nprivate image registry. If you opt to do so, it will be available for use\nby all pods in their respective `spec.imagePullSecrets` alongside other\nk8s Secrets defined in `imagePullSecrets` or the pod respective\n`...image.pullSecrets` configuration.\n\nIn other words, using this configuration option can automate both the\notherwise manual creation of a k8s Secret and the otherwise manual\nconfiguration to reference this k8s Secret in all the pods of the Helm\nchart.\n\n```sh\n# you won't need to create a k8s Secret manually...\nkubectl create secret docker-registry image-pull-secret \\\n --docker-server= \\\n --docker-username= \\\n --docker-email= \\\n --docker-password=\n```\n\nIf you just want to let all Pods reference an existing secret, use the\n[`imagePullSecrets`](schema_imagePullSecrets) configuration instead.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Toggle the creation of the k8s Secret with provided credentials to\naccess a private image registry.\n" - }, - "automaticReferenceInjection": { - "type": "boolean", - "description": "Toggle the automatic reference injection of the created Secret to all\npods' `spec.imagePullSecrets` configuration.\n" - }, - "registry": { - "type": "string", - "description": "Name of the private registry you want to create a credential set for.\nIt will default to Docker Hub's image registry.\n\nExamples:\n - https://index.docker.io/v1/\n - quay.io\n - eu.gcr.io\n - alexmorreale.privatereg.net\n" - }, - "username": { - "type": "string", - "description": "Name of the user you want to use to connect to your private registry.\n\nFor external gcr.io, you will use the `_json_key`.\n\nExamples:\n - alexmorreale\n - alex@pfc.com\n - _json_key\n" - }, - "password": { - "type": "string", - "description": "Password for the private image registry's user.\n\nExamples:\n - plaintextpassword\n - abc123SECRETzyx098\n\nFor gcr.io registries the password will be a big JSON blob for a\nGoogle cloud service account, it should look something like below.\n\n```yaml\npassword: |-\n {\n \"type\": \"service_account\",\n \"project_id\": \"jupyter-se\",\n \"private_key_id\": \"f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85\",\n ...\n }\n```\n" - }, - "email": { - "type": [ - "string", - "null" - ], - "description": "Specification of an email is most often not required, but it is\nsupported.\n" - } - } - } - }, - "imagePullSecrets": { - "type": "array", - "description": "Chart wide configuration to _append_ k8s Secret references to all its\npod's `spec.imagePullSecrets` configuration.\n\nThis will not override or get overridden by pod specific configuration,\nbut instead augment the pod specific configuration.\n\nYou can use both the k8s native syntax, where each list element is like\n`{\"name\": \"my-secret-name\"}`, or you can let list elements be strings\nnaming the secrets directly.\n" - }, - "hub": { - "type": "object", - "additionalProperties": false, - "required": [ - "baseUrl" - ], - "properties": { - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "config": { - "type": "object", - "additionalProperties": false, - "description": "JupyterHub and its components (authenticators, spawners, etc), are\nPython classes that expose its configuration through\n[_traitlets_](https://traitlets.readthedocs.io/en/stable/). With this\nHelm chart configuration (`hub.config`), you can directly configure\nthe Python classes through _static_ YAML values. To _dynamically_ set\nvalues, you need to use [`hub.extraConfig`](schema_hub.extraConfig)\ninstead.\n\n```{admonition} Some configuration must be set in multiple places\n:class: warning\nThis config _currently_ (0.11.0) only influence the software in the\n`hub` Pod, but some Helm chart config options such as\n[`hub.baseUrl`](schema_hub.baseUrl) is used to set\n`JupyterHub.base_url` in the `hub` Pod _and_ influence how other Helm\ntemplates are rendered.\n\nAs we have not yet mapped out all the potential configuration\nconflicts except for the authentication related configuration options,\nplease accept that using it for something else at this point can lead\nto issues.\n```\n\n__Example__\n\nIf you inspect documentation or some `jupyterhub_config.py` to contain\nthe following section:\n\n```python\nc.JupyterHub.admin_access = true\nc.JupyterHub.admin_users = [\"jovyan1\", \"jovyan2\"]\nc.KubeSpawner.k8s_api_request_timeout = 10\nc.GitHubOAuthenticator.allowed_organizations = [\"jupyterhub\"]\n```\n\nThen, you would be able to represent it with this configuration like:\n\n```yaml\nhub:\n config:\n JupyterHub:\n admin_access: true\n admin_users:\n - jovyan1\n - jovyan2\n KubeSpawner:\n k8s_api_request_timeout: 10\n GitHubOAuthenticator:\n allowed_organizations:\n - jupyterhub\n```\n\n```{admonition} YAML limitations\n:class: tip\nYou can't represent Python `Bytes` or `Set` objects in YAML directly.\n```\n\n```{admonition} Helm value merging\n:class: tip\n`helm` merges a Helm chart's default values with values passed with\nthe `--values` or `-f` flag. During merging, lists are replaced while\ndictionaries are updated.\n```\n", - "patternProperties": { - "^[A-Z].*$": { - "type": "object", - "additionalProperties": true, - "description": "Pass-through traitlets configuration of Configurable classes.\nKeys must always be class names that start with capitals,\nand values must be objects.\n" - } - }, - "properties": { - "JupyterHub": { - "type": "object", - "additionalProperties": true, - "description": "JupyterHub Traitlets configuration.\n\nSee {py:mod}`jupyterhub:jupyterhub.app` for the full list,\nbut take note of the [above warnings](schema_hub.config).\n", - "properties": { - "subdomain_host": { - "type": "string", - "description": "The subdomain to use for hosting singleuser servers.\n\nThis helps protect against some cross-origin attacks by giving each user\ntheir own subdomain `.jupyter.example.org`.\n\nSee {ref}`jupyterhub_subdomains`.\n" - } - } - } - } - }, - "extraFiles": { - "type": "object", - "additionalProperties": false, - "description": "A dictionary with extra files to be injected into the pod's container\non startup. This can for example be used to inject: configuration\nfiles, custom user interface templates, images, and more.\n\n```yaml\n# NOTE: \"hub\" is used in this example, but the configuration is the\n# same for \"singleuser\".\nhub:\n extraFiles:\n # The file key is just a reference that doesn't influence the\n # actual file name.\n :\n # mountPath is required and must be the absolute file path.\n mountPath: \n\n # Choose one out of the three ways to represent the actual file\n # content: data, stringData, or binaryData.\n #\n # data should be set to a mapping (dictionary). It will in the\n # end be rendered to either YAML, JSON, or TOML based on the\n # filename extension that are required to be either .yaml, .yml,\n # .json, or .toml.\n #\n # If your content is YAML, JSON, or TOML, it can make sense to\n # use data to represent it over stringData as data can be merged\n # instead of replaced if set partially from separate Helm\n # configuration files.\n #\n # Both stringData and binaryData should be set to a string\n # representing the content, where binaryData should be the\n # base64 encoding of the actual file content.\n #\n data:\n myConfig:\n myMap:\n number: 123\n string: \"hi\"\n myList:\n - 1\n - 2\n stringData: |\n hello world!\n binaryData: aGVsbG8gd29ybGQhCg==\n\n # mode is by default 0644 and you can optionally override it\n # either by octal notation (example: 0400) or decimal notation\n # (example: 256).\n mode: \n```\n\n**Using --set-file**\n\nTo avoid embedding entire files in the Helm chart configuration, you\ncan use the `--set-file` flag during `helm upgrade` to set the\nstringData or binaryData field.\n\n```yaml\nhub:\n extraFiles:\n my_image:\n mountPath: /usr/local/share/jupyterhub/static/my_image.png\n\n # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are\n # automatically loaded in alphabetical order of the final file\n # name when JupyterHub starts.\n my_config:\n mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py\n```\n\n```bash\n# --set-file expects a text based file, so you need to base64 encode\n# it manually first.\nbase64 my_image.png > my_image.png.b64\n\nhelm upgrade <...> \\\n --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \\\n --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py\n```\n\n**Common uses**\n\n1. **JupyterHub template customization**\n\n You can replace the default JupyterHub user interface templates in\n the hub pod by injecting new ones to\n `/usr/local/share/jupyterhub/templates`. These can in turn\n reference custom images injected to\n `/usr/local/share/jupyterhub/static`.\n\n1. **JupyterHub standalone file config**\n\n Instead of embedding JupyterHub python configuration as a string\n within a YAML file through\n [`hub.extraConfig`](schema_hub.extraConfig), you can inject a\n standalone .py file into\n `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is\n automatically loaded.\n\n1. **Flexible configuration**\n\n By injecting files, you don't have to embed them in a docker image\n that you have to rebuild.\n\n If your configuration file is a YAML/JSON/TOML file, you can also\n use `data` instead of `stringData` which allow you to set various\n configuration in separate Helm config files. This can be useful to\n help dependent charts override only some configuration part of the\n file, or to allow for the configuration be set through multiple\n Helm configuration files.\n\n**Limitations**\n\n1. File size\n\n The files in `hub.extraFiles` and `singleuser.extraFiles` are\n respectively stored in their own k8s Secret resource. As k8s\n Secret's are limited, typically to 1MB, you will be limited to a\n total file size of less than 1MB as there is also base64 encoding\n that takes place reducing available capacity to 75%.\n\n2. File updates\n\n The files that are mounted are only set during container startup.\n This is [because we use\n `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)\n as is required to avoid replacing the content of the entire\n directory we mount in.\n", - "patternProperties": { - ".*": { - "type": "object", - "additionalProperties": false, - "required": [ - "mountPath" - ], - "oneOf": [ - { - "required": [ - "data" - ] - }, - { - "required": [ - "stringData" - ] - }, - { - "required": [ - "binaryData" - ] - } - ], - "properties": { - "mountPath": { - "type": "string" - }, - "data": { - "type": "object", - "additionalProperties": true - }, - "stringData": { - "type": "string" - }, - "binaryData": { - "type": "string" - }, - "mode": { - "type": "number" - } - } - } - } - }, - "baseUrl": { - "type": "string", - "description": "This is the equivalent of c.JupyterHub.base_url, but it is also needed\nby the Helm chart in general. So, instead of setting\nc.JupyterHub.base_url, use this configuration.\n" - }, - "command": { - "type": "array", - "description": "A list of strings to be used to replace the JupyterHub image's\n`ENTRYPOINT` entry. Note that in k8s lingo, the Dockerfile's\n`ENTRYPOINT` is called `command`. The list of strings will be expanded\nwith Helm's template function `tpl` which can render Helm template\nlogic inside curly braces (`{{... }}`).\n\nThis could be useful to wrap the invocation of JupyterHub itself in\nsome custom way.\n\nFor more details, see the [Kubernetes\ndocumentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).\n" - }, - "args": { - "type": "array", - "description": "A list of strings to be used to replace the JupyterHub image's `CMD`\nentry as well as the Helm chart's default way to start JupyterHub.\nNote that in k8s lingo, the Dockerfile's `CMD` is called `args`. The\nlist of strings will be expanded with Helm's template function `tpl`\nwhich can render Helm template logic inside curly braces (`{{... }}`).\n\n```{warning}\nBy replacing the entire configuration file, which is mounted to\n`/usr/local/etc/jupyterhub/jupyterhub_config.py` by the Helm chart,\ninstead of appending to it with `hub.extraConfig`, you expose your\ndeployment for issues stemming from getting out of sync with the Helm\nchart's config file.\n\nThese kind of issues will be significantly harder to debug and\ndiagnose, and can due to this could cause a lot of time expenditure\nfor both the community maintaining the Helm chart as well as yourself,\neven if this wasn't the reason for the issue.\n\nDue to this, we ask that you do your _absolute best to avoid replacing\nthe default provided `jupyterhub_config.py` file. It can often be\npossible. For example, if your goal is to have a dedicated .py file\nfor more extensive additions that you can syntax highlight and such\nand feel limited by passing code in `hub.extraConfig` which is part of\na YAML file, you can use [this\ntrick](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1580#issuecomment-707776237)\ninstead.\n```\n\n```yaml\nhub:\n args:\n - \"jupyterhub\"\n - \"--config\"\n - \"/usr/local/etc/jupyterhub/jupyterhub_config.py\"\n - \"--debug\"\n - \"--upgrade-db\"\n```\n\nFor more details, see the [Kubernetes\ndocumentation](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/).\n" - }, - "cookieSecret": { - "type": [ - "string", - "null" - ], - "description": "```{note}\nAs of version 1.0.0 this will automatically be generated and there is\nno need to set it manually.\n\nIf you wish to reset a generated key, you can use `kubectl edit` on\nthe k8s Secret typically named `hub` and remove the\n`hub.config.JupyterHub.cookie_secret` entry in the k8s Secret, then\nperform a new `helm upgrade`.\n```\n\nA 32-byte cryptographically secure randomly generated string used to sign values of\nsecure cookies set by the hub. If unset, jupyterhub will generate one on startup and\nsave it in the file `jupyterhub_cookie_secret` in the `/srv/jupyterhub` directory of\nthe hub container. A value set here will make JupyterHub overwrite any previous file.\n\nYou do not need to set this at all if you are using the default configuration for\nstoring databases - sqlite on a persistent volume (with `hub.db.type` set to the\ndefault `sqlite-pvc`). If you are using an external database, then you must set this\nvalue explicitly - or your users will keep getting logged out each time the hub pod\nrestarts.\n\nChanging this value will all user logins to be invalidated. If this secret leaks,\n*immediately* change it to something else, or user data can be compromised\n\n```sh\n# to generate a value, run\nopenssl rand -hex 32\n```\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "networkPolicy": { - "type": "object", - "additionalProperties": false, - "description": "This configuration regards the creation and configuration of a k8s\n_NetworkPolicy resource_.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Toggle the creation of the NetworkPolicy resource targeting this\npod, and by doing so, restricting its communication to only what\nis explicitly allowed in the NetworkPolicy.\n" - }, - "ingress": { - "type": "array", - "description": "Additional ingress rules to add besides those that are required\nfor core functionality.\n" - }, - "egress": { - "type": "array", - "description": "Additional egress rules to add besides those that are required for\ncore functionality and those added via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).\n\n```{versionchanged} 2.0.0\nThe default value changed from providing one very permissive rule\nallowing all egress to providing no rule. The permissive rule is\nstill provided via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)\nset to true though.\n```\n\nAs an example, below is a configuration that disables the more\nbroadly permissive `.privateIPs` egress allow rule for the hub\npod, and instead provides tightly scoped permissions to access a\nspecific k8s local service as identified by pod labels.\n\n```yaml\nhub:\n networkPolicy:\n egressAllowRules:\n privateIPs: false\n egress:\n - to:\n - podSelector:\n matchLabels:\n app.kubernetes.io/name: my-k8s-local-service\n ports:\n - protocol: TCP\n port: 5978\n```\n" - }, - "egressAllowRules": { - "type": "object", - "additionalProperties": false, - "description": "This is a set of predefined rules that when enabled will be added\nto the NetworkPolicy list of egress rules.\n\nThe resulting egress rules will be a composition of:\n- rules specific for the respective pod(s) function within the\n Helm chart\n- rules based on enabled `egressAllowRules` flags\n- rules explicitly specified by the user\n\n```{note}\nEach flag under this configuration will not render into a\ndedicated rule in the NetworkPolicy resource, but instead combine\nwith the other flags to a reduced set of rules to avoid a\nperformance penalty.\n```\n\n```{versionadded} 2.0.0\n```\n", - "properties": { - "cloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server.\n\nNote that the `nonPrivateIPs` rule is allowing all non Private\nIP ranges but makes an exception for the cloud metadata\nserver, leaving this as the definitive configuration to allow\naccess to the cloud metadata server.\n\n```{versionchanged} 3.0.0\nThis configuration is not allowed to be configured true at the\nsame time as\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n```\n" - }, - "dnsPortsCloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server\nvia port 53.\n\nRelying on this rule for the singleuser config should go hand\nin hand with disabling\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n\nKnown situations when this rule can be relevant:\n\n- In GKE clusters with Cloud DNS that is reached at the\n cloud metadata server's non-private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsKubeSystemNamespace": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to pods in the kube-system\nnamespace via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE, EKS, AKS, and other clusters relying directly on\n `kube-dns` or `coredns` pods in the `kube-system` namespace.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to private IPs via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE clusters relying on a DNS server indirectly via a a node\n local DNS cache at an unknown private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - }, - "nonPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the non-private IP ranges\nwith the exception of the cloud metadata server. This means\nrespective pod(s) can establish connections to the internet\nbut not (say) an unsecured prometheus server running in the\nsame cluster.\n" - }, - "privateIPs": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nPrivate IPs refer to the IP ranges `10.0.0.0/8`,\n`172.16.0.0/12`, `192.168.0.0/16`.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the internal k8s cluster.\nThis means users can access the internet but not (say) an\nunsecured prometheus server running in the same cluster.\n\nSince not all workloads in the k8s cluster may have\nNetworkPolicies setup to restrict their incoming connections,\nhaving this set to false can be a good defense against\nmalicious intent from someone in control of software in these\npods.\n\nIf possible, try to avoid setting this to true as it gives\nbroad permissions that could be specified more directly via\nthe [`.egress`](schema_singleuser.networkPolicy.egress).\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - } - } - }, - "interNamespaceAccessLabels": { - "enum": [ - "accept", - "ignore" - ], - "description": "This configuration option determines if both namespaces and pods\nin other namespaces, that have specific access labels, should be\naccepted to allow ingress (set to `accept`), or, if the labels are\nto be ignored when applied outside the local namespace (set to\n`ignore`).\n\nThe available access labels for respective NetworkPolicy resources\nare:\n\n- `hub.jupyter.org/network-access-hub: \"true\"` (hub)\n- `hub.jupyter.org/network-access-proxy-http: \"true\"` (proxy.chp, proxy.traefik)\n- `hub.jupyter.org/network-access-proxy-api: \"true\"` (proxy.chp)\n- `hub.jupyter.org/network-access-singleuser: \"true\"` (singleuser)\n" - }, - "allowedIngressPorts": { - "type": "array", - "description": "A rule to allow ingress on these ports will be added no matter\nwhat the origin of the request is. The default setting for\n`proxy.chp` and `proxy.traefik`'s networkPolicy configuration is\n`[http, https]`, while it is `[]` for other networkPolicies.\n\nNote that these port names or numbers target a Pod's port name or\nnumber, not a k8s Service's port name or number.\n" - } - } - }, - "db": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "enum": [ - "sqlite-pvc", - "sqlite-memory", - "mysql", - "postgres", - "other" - ], - "description": "Type of database backend to use for the hub database.\n\nThe Hub requires a persistent database to function, and this lets you specify\nwhere it should be stored.\n\nThe various options are:\n\n1. **sqlite-pvc**\n\n Use an `sqlite` database kept on a persistent volume attached to the hub.\n\n By default, this disk is created by the cloud provider using\n *dynamic provisioning* configured by a [storage\n class](https://kubernetes.io/docs/concepts/storage/storage-classes/).\n You can customize how this disk is created / attached by\n setting various properties under `hub.db.pvc`.\n\n This is the default setting, and should work well for most cloud provider\n deployments.\n\n2. **sqlite-memory**\n\n Use an in-memory `sqlite` database. This should only be used for testing,\n since the database is erased whenever the hub pod restarts - causing the hub\n to lose all memory of users who had logged in before.\n\n When using this for testing, make sure you delete all other objects that the\n hub has created (such as user pods, user PVCs, etc) every time the hub restarts.\n Otherwise you might run into errors about duplicate resources.\n\n3. **mysql**\n\n Use an externally hosted mysql database.\n\n You have to specify an sqlalchemy connection string for the mysql database you\n want to connect to in `hub.db.url` if using this option.\n\n The general format of the connection string is:\n ```\n mysql+pymysql://:@:/\n ```\n\n The user specified in the connection string must have the rights to create\n tables in the database specified.\n\n4. **postgres**\n\n Use an externally hosted postgres database.\n\n You have to specify an sqlalchemy connection string for the postgres database you\n want to connect to in `hub.db.url` if using this option.\n\n The general format of the connection string is:\n ```\n postgresql+psycopg2://:@:/\n ```\n\n The user specified in the connection string must have the rights to create\n tables in the database specified.\n\n5. **other**\n\n Use an externally hosted database of some kind other than mysql\n or postgres.\n\n When using _other_, the database password must be passed as\n part of [hub.db.url](schema_hub.db.url) as\n [hub.db.password](schema_hub.db.password) will be ignored.\n" - }, - "pvc": { - "type": "object", - "additionalProperties": false, - "required": [ - "storage" - ], - "description": "Customize the Persistent Volume Claim used when `hub.db.type` is `sqlite-pvc`.\n", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the PVC containing the sqlite database.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\nfor more details about annotations.\n" - }, - "selector": { - "type": "object", - "additionalProperties": true, - "description": "Label selectors to set for the PVC containing the sqlite database.\n\nUseful when you are using a specific PV, and want to bind to\nthat and only that.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)\nfor more details about using a label selector for what PV to\nbind to.\n" - }, - "storage": { - "type": "string", - "description": "Size of disk to request for the database disk.\n" - }, - "accessModes": { - "type": "array", - "items": { - "type": [ - "string", - "null" - ] - }, - "description": "AccessModes contains the desired access modes the volume\nshould have. See [the k8s\ndocumentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1)\nfor more information.\n" - }, - "storageClassName": { - "type": [ - "string", - "null" - ], - "description": "Name of the StorageClass required by the claim.\n\nIf this is a blank string it will be set to a blank string,\nwhile if it is null, it will not be set at all.\n" - }, - "subPath": { - "type": [ - "string", - "null" - ], - "description": "Path within the volume from which the container's volume\nshould be mounted. Defaults to \"\" (volume's root).\n" - } - } - }, - "upgrade": { - "type": [ - "boolean", - "null" - ], - "description": "Users with external databases need to opt-in for upgrades of the\nJupyterHub specific database schema if needed as part of a\nJupyterHub version upgrade.\n" - }, - "url": { - "type": [ - "string", - "null" - ], - "description": "Connection string when `hub.db.type` is mysql or postgres.\n\nSee documentation for `hub.db.type` for more details on the format of this property.\n" - }, - "password": { - "type": [ - "string", - "null" - ], - "description": "Password for the database when `hub.db.type` is mysql or postgres.\n" - } - } - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the hub pod.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "initContainers": { - "type": "array", - "description": "list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)\n\n```yaml\nhub:\n initContainers:\n - name: init-myservice\n image: busybox:1.28\n command: ['sh', '-c', 'command1']\n - name: init-mydb\n image: busybox:1.28\n command: ['sh', '-c', 'command2']\n```\n" - }, - "extraEnv": { - "type": [ - "object", - "array" - ], - "additionalProperties": true, - "description": "Extra environment variables that should be set for the hub pod.\n\nEnvironment variables are usually used to:\n - Pass parameters to some custom code in `hub.extraConfig`.\n - Configure code running in the hub pod, such as an authenticator or\n spawner.\n\nString literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which\nis a part of Kubernetes.\n\n```yaml\nhub:\n extraEnv:\n # basic notation (for literal values only)\n MY_ENV_VARS_NAME1: \"my env var value 1\"\n\n # explicit notation (the \"name\" field takes precedence)\n HUB_NAMESPACE:\n name: HUB_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n\n # implicit notation (the \"name\" field is implied)\n PREFIXED_HUB_NAMESPACE:\n value: \"my-prefix-$(HUB_NAMESPACE)\"\n SECRET_VALUE:\n valueFrom:\n secretKeyRef:\n name: my-k8s-secret\n key: password\n```\n\nFor more information, see the [Kubernetes EnvVar\nspecification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).\n" - }, - "extraConfig": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra python based configuration that should be in `jupyterhub_config.py`.\n\nThis is the *escape hatch* - if you want to configure JupyterHub to do something specific\nthat is not present here as an option, you can write the raw Python to do it here.\n\nextraConfig is a *dict*, so there can be multiple configuration\nsnippets under different names. The configuration sections are run in\nalphabetical order based on the keys.\n\nNon-exhaustive examples of things you can do here:\n - Subclass authenticator / spawner to do a custom thing\n - Dynamically launch different images for different sets of images\n - Inject an auth token from GitHub authenticator into user pod\n - Anything else you can think of!\n\nSince this is usually a multi-line string, you want to format it using YAML's\n[| operator](https://yaml.org/spec/1.2.2/#23-scalars).\n\nFor example:\n\n```yaml\nhub:\n extraConfig:\n myConfig.py: |\n c.JupyterHub.something = 'something'\n c.Spawner.something_else = 'something else'\n```\n\n```{note}\nNo code validation is performed until JupyterHub loads it! If you make\na typo here, it will probably manifest itself as the hub pod failing\nto start up and instead entering an `Error` state or the subsequent\n`CrashLoopBackoff` state.\n\nTo make use of your own programs linters etc, it would be useful to\nnot embed Python code inside a YAML file. To do that, consider using\n[`hub.extraFiles`](schema_hub.extraFiles) and mounting a file to\n`/usr/local/etc/jupyterhub/jupyterhub_config.d` in order to load your\nextra configuration logic.\n```\n" - }, - "fsGid": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "```{note}\nRemoved in version 2.0.0. Use\n[`hub.podSecurityContext`](schema_hub.podSecurityContext) and specify\n`fsGroup` instead.\n```\n" - }, - "service": { - "type": "object", - "additionalProperties": false, - "description": "Object to configure the service the JupyterHub will be exposed on by the Kubernetes server.\n", - "properties": { - "type": { - "enum": [ - "ClusterIP", - "NodePort", - "LoadBalancer", - "ExternalName" - ], - "description": "The Kubernetes ServiceType to be used.\n\nThe default type is `ClusterIP`.\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)\nto learn more about service types.\n" - }, - "ports": { - "type": "object", - "additionalProperties": false, - "description": "Object to configure the ports the hub service will be deployed on.\n", - "properties": { - "appProtocol": { - "type": [ - "string", - "null" - ], - "description": "The application protocol for the Service port. Required when an external app\nuses a protocol the hub does not support.\n" - }, - "nodePort": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "The nodePort to deploy the hub service on.\n" - } - } - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the hub service.\n" - }, - "extraPorts": { - "type": "array", - "description": "Extra ports to add to the Hub Service object besides `hub` / `8081`.\nThis should be an array that includes `name`, `port`, and `targetPort`.\nSee [Multi-port Services](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services) for more details.\n" - }, - "loadBalancerIP": { - "type": [ - "string", - "null" - ], - "description": "A public IP address the hub Kubernetes service should be exposed\non. To expose the hub directly is not recommended. Instead route\ntraffic through the proxy-public service towards the hub.\n" - }, - "ipFamilyPolicy": { - "type": [ - "string" - ], - "description": "See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services)\nfor more info.\n" - }, - "ipFamilies": { - "type": "array", - "description": "See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services)\nfor more info.\n" - } - } - }, - "pdb": { - "type": "object", - "additionalProperties": false, - "description": "Configure a PodDisruptionBudget for this Deployment.\n\nThese are disabled by default for our deployments that don't support\nbeing run in parallel with multiple replicas. Only the user-scheduler\ncurrently supports being run in parallel with multiple replicas. If\nthey are enabled for a Deployment with only one replica, they will\nblock `kubectl drain` of a node for example.\n\nNote that if you aim to block scaling down a node with the\nhub/proxy/autohttps pod that would cause disruptions of the\ndeployment, then you should instead annotate the pods of the\nDeployment [as described\nhere](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).\n\n \"cluster-autoscaler.kubernetes.io/safe-to-evict\": \"false\"\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)\nfor more details about disruptions.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Decides if a PodDisruptionBudget is created targeting the\nDeployment's pods.\n" - }, - "maxUnavailable": { - "type": [ - "integer", - "null" - ], - "description": "The maximum number of pods that can be unavailable during\nvoluntary disruptions.\n" - }, - "minAvailable": { - "type": [ - "integer", - "null" - ], - "description": "The minimum number of pods required to be available during\nvoluntary disruptions.\n" - } - } - }, - "existingSecret": { - "type": [ - "string", - "null" - ], - "description": "This option allow you to provide the name of an existing k8s Secret to\nuse alongside of the chart managed k8s Secret. The content of this k8s\nSecret will be merged with the chart managed k8s Secret, giving\npriority to the self-managed k8s Secret.\n\n```{warning}\n1. The self managed k8s Secret must mirror the structure in the chart\n managed secret.\n2. [`proxy.secretToken`](schema_proxy.secretToken) (aka.\n `hub.config.ConfigurableHTTPProxy.auth_token`) is only read from\n the chart managed k8s Secret.\n```\n" - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "activeServerLimit": { - "type": [ - "integer", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "allowNamedServers": { - "type": [ - "boolean", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "K8s annotations for the hub pod.\n" - }, - "authenticatePrometheus": { - "type": [ - "boolean", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "concurrentSpawnLimit": { - "type": [ - "integer", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "consecutiveFailureLimit": { - "type": [ - "integer", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "podSecurityContext": { - "additionalProperties": true, - "description": "A k8s native specification of the pod's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core)\nfor details.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "deploymentStrategy": { - "type": "object", - "additionalProperties": false, - "properties": { - "rollingUpdate": { - "type": [ - "string", - "null" - ] - }, - "type": { - "type": [ - "string", - "null" - ], - "description": "JupyterHub does not support running in parallel, due to this we\ndefault to using a deployment strategy of Recreate.\n" - } - } - }, - "extraContainers": { - "type": "array", - "description": "Additional containers for the Pod. Use a k8s native syntax.\n" - }, - "extraVolumeMounts": { - "type": "array", - "description": "Additional volume mounts for the Container. Use a k8s native syntax.\n" - }, - "extraVolumes": { - "type": "array", - "description": "Additional volumes for the Pod. Use a k8s native syntax.\n" - }, - "livenessProbe": { - "type": "object", - "additionalProperties": true, - "required": [ - "enabled" - ], - "if": { - "properties": { - "enabled": { - "const": true - } - } - }, - "then": { - "description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n" - } - }, - "readinessProbe": { - "type": "object", - "additionalProperties": true, - "required": [ - "enabled" - ], - "if": { - "properties": { - "enabled": { - "const": true - } - } - }, - "then": { - "description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n" - } - }, - "namedServerLimitPerUser": { - "type": [ - "integer", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "redirectToServer": { - "type": [ - "boolean", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - }, - "lifecycle": { - "type": "object", - "additionalProperties": false, - "description": "A k8s native specification of lifecycle hooks on the container, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#lifecycle-v1-core).\n", - "properties": { - "postStart": { - "type": "object", - "additionalProperties": true - }, - "preStop": { - "type": "object", - "additionalProperties": true - } - } - }, - "services": { - "type": "object", - "additionalProperties": true, - "description": "This is where you register JupyterHub services. For details on how to\nconfigure these services in this Helm chart just keep reading but for\ndetails on services themselves instead read [JupyterHub's\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/service.html).\n\n```{note}\nOnly a selection of JupyterHub's configuration options that can be\nconfigured for a service are documented below. All configuration set\nhere will be applied even if this Helm chart doesn't recognize it.\n```\n\nJupyterHub's native configuration accepts a list of service objects,\nthis Helm chart only accept a dictionary where each key represents the\nname of a service and the value is the actual service objects.\n\nWhen configuring JupyterHub services via this Helm chart, the `name`\nfield can be omitted as it can be implied by the dictionary key.\nFurther, the `api_token` field can be omitted as it will be\nautomatically generated as of version 1.1.0 of this Helm chart.\n\nIf you have an external service that needs to access the automatically\ngenerated api_token for the service, you can access it from the `hub`\nk8s Secret part of this Helm chart under the key\n`hub.services.my-service-config-key.apiToken`.\n\nHere is an example configuration of two services where the first\nexplicitly sets a name and api_token, while the second omits those and\nlets the name be implied from the key name and the api_token be\nautomatically generated.\n\n```yaml\nhub:\n services:\n my-service-1:\n admin: true\n name: my-explicitly-set-service-name\n api_token: my-explicitly-set-api_token\n\n # the name of the following service will be my-service-2\n # the api_token of the following service will be generated\n my-service-2: {}\n```\n\nIf you develop a Helm chart depending on the JupyterHub Helm chart and\nwant to let some Pod's environment variable be populated with the\napi_token of a service registered like above, then do something along\nthese lines.\n\n```yaml\n# ... container specification of a pod ...\nenv:\n - name: MY_SERVICE_1_API_TOKEN\n valueFrom:\n secretKeyRef:\n # Don't hardcode the name, use the globally accessible\n # named templates part of the JupyterHub Helm chart.\n name: {{ include \"jupyterhub.hub.fullname\" . }}\n # Note below the use of the configuration key my-service-1\n # rather than the explicitly set service name.\n key: hub.services.my-service-1.apiToken\n```\n", - "properties": { - "name": { - "type": "string", - "description": "The name can be implied via the key name under which this\nservice is configured, and is due to that allowed to be\nomitted in this Helm chart configuration of JupyterHub.\n" - }, - "admin": { - "type": "boolean" - }, - "command": { - "type": [ - "string", - "array" - ] - }, - "url": { - "type": "string" - }, - "api_token": { - "type": [ - "string", - "null" - ], - "description": "The api_token will be automatically generated if not\nexplicitly set. It will also be exposed in via a k8s Secret\npart of this Helm chart under a specific key.\n\nSee the documentation under\n[`hub.services`](schema_hub.services) for details about this.\n" - }, - "apiToken": { - "type": [ - "string", - "null" - ], - "description": "An alias for api_token provided for backward compatibility by\nthe JupyterHub Helm chart that will be transformed to\napi_token.\n" - } - } - }, - "loadRoles": { - "type": "object", - "additionalProperties": true, - "description": "This is where you should define JupyterHub roles and apply them to\nJupyterHub users, groups, and services to grant them additional\npermissions as defined in JupyterHub's RBAC system.\n\nComplement this documentation with [JupyterHub's\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles)\nabout `load_roles`.\n\nNote that while JupyterHub's native configuration `load_roles` accepts\na list of role objects, this Helm chart only accepts a dictionary where\neach key represents the name of a role and the value is the actual\nrole object.\n\n```yaml\nhub:\n loadRoles:\n teacher:\n description: Access to users' information and group membership\n\n # this role provides permissions to...\n scopes: [users, groups]\n\n # this role will be assigned to...\n users: [erik]\n services: [grading-service]\n groups: [teachers]\n```\n\nWhen configuring JupyterHub roles via this Helm chart, the `name`\nfield can be omitted as it can be implied by the dictionary key.\n" - }, - "shutdownOnLogout": { - "type": [ - "boolean", - "null" - ], - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "templatePaths": { - "type": "array", - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "templateVars": { - "type": "object", - "additionalProperties": true, - "description": "JupyterHub native configuration, see the [JupyterHub\ndocumentation](https://jupyterhub.readthedocs.io/en/stable/reference/api/app.html)\nfor more information.\n" - }, - "serviceAccount": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "extraPodSpec": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra k8s pod specification as a YAML object. The default\nvalue of this setting is an empty object, i.e. no extra configuration.\nThe value of this property is augmented to the pod specification as-is.\n\nThis is a powerful tool for expert k8s administrators with advanced\nconfiguration requirements. This setting should only be used for\nconfiguration that cannot be accomplished through the other settings.\nMisusing this setting can break your deployment and/or compromise\nyour system security.\n\nThis is one of four related settings for inserting arbitrary pod\nspecification:\n\n1. hub.extraPodSpec\n2. proxy.chp.extraPodSpec\n3. proxy.traefik.extraPodSpec\n4. scheduling.userScheduler.extraPodSpec\n5. scheduling.userPlaceholder.extraPodSpec\n\nOne real-world use of these settings is to enable host networking. For\nexample, to configure host networking for the hub pod, add the\nfollowing to your helm configuration values:\n\n```yaml\nhub:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nLikewise, to configure host networking for the proxy pod, add the\nfollowing:\n\n```yaml\nproxy:\n chp:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nN.B. Host networking has special security implications and can easily\nbreak your deployment. This is an example\u2014not an endorsement.\n\nSee [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)\nfor the latest pod resource specification.\n" - } - } - }, - "proxy": { - "type": "object", - "additionalProperties": false, - "properties": { - "chp": { - "type": "object", - "additionalProperties": false, - "description": "Configure the configurable-http-proxy (chp) pod managed by jupyterhub to route traffic\nboth to itself and to user pods.\n", - "properties": { - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "networkPolicy": { - "type": "object", - "additionalProperties": false, - "description": "This configuration regards the creation and configuration of a k8s\n_NetworkPolicy resource_.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Toggle the creation of the NetworkPolicy resource targeting this\npod, and by doing so, restricting its communication to only what\nis explicitly allowed in the NetworkPolicy.\n" - }, - "ingress": { - "type": "array", - "description": "Additional ingress rules to add besides those that are required\nfor core functionality.\n" - }, - "egress": { - "type": "array", - "description": "Additional egress rules to add besides those that are required for\ncore functionality and those added via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).\n\n```{versionchanged} 2.0.0\nThe default value changed from providing one very permissive rule\nallowing all egress to providing no rule. The permissive rule is\nstill provided via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)\nset to true though.\n```\n\nAs an example, below is a configuration that disables the more\nbroadly permissive `.privateIPs` egress allow rule for the hub\npod, and instead provides tightly scoped permissions to access a\nspecific k8s local service as identified by pod labels.\n\n```yaml\nhub:\n networkPolicy:\n egressAllowRules:\n privateIPs: false\n egress:\n - to:\n - podSelector:\n matchLabels:\n app.kubernetes.io/name: my-k8s-local-service\n ports:\n - protocol: TCP\n port: 5978\n```\n" - }, - "egressAllowRules": { - "type": "object", - "additionalProperties": false, - "description": "This is a set of predefined rules that when enabled will be added\nto the NetworkPolicy list of egress rules.\n\nThe resulting egress rules will be a composition of:\n- rules specific for the respective pod(s) function within the\n Helm chart\n- rules based on enabled `egressAllowRules` flags\n- rules explicitly specified by the user\n\n```{note}\nEach flag under this configuration will not render into a\ndedicated rule in the NetworkPolicy resource, but instead combine\nwith the other flags to a reduced set of rules to avoid a\nperformance penalty.\n```\n\n```{versionadded} 2.0.0\n```\n", - "properties": { - "cloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server.\n\nNote that the `nonPrivateIPs` rule is allowing all non Private\nIP ranges but makes an exception for the cloud metadata\nserver, leaving this as the definitive configuration to allow\naccess to the cloud metadata server.\n\n```{versionchanged} 3.0.0\nThis configuration is not allowed to be configured true at the\nsame time as\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n```\n" - }, - "dnsPortsCloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server\nvia port 53.\n\nRelying on this rule for the singleuser config should go hand\nin hand with disabling\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n\nKnown situations when this rule can be relevant:\n\n- In GKE clusters with Cloud DNS that is reached at the\n cloud metadata server's non-private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsKubeSystemNamespace": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to pods in the kube-system\nnamespace via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE, EKS, AKS, and other clusters relying directly on\n `kube-dns` or `coredns` pods in the `kube-system` namespace.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to private IPs via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE clusters relying on a DNS server indirectly via a a node\n local DNS cache at an unknown private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - }, - "nonPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the non-private IP ranges\nwith the exception of the cloud metadata server. This means\nrespective pod(s) can establish connections to the internet\nbut not (say) an unsecured prometheus server running in the\nsame cluster.\n" - }, - "privateIPs": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nPrivate IPs refer to the IP ranges `10.0.0.0/8`,\n`172.16.0.0/12`, `192.168.0.0/16`.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the internal k8s cluster.\nThis means users can access the internet but not (say) an\nunsecured prometheus server running in the same cluster.\n\nSince not all workloads in the k8s cluster may have\nNetworkPolicies setup to restrict their incoming connections,\nhaving this set to false can be a good defense against\nmalicious intent from someone in control of software in these\npods.\n\nIf possible, try to avoid setting this to true as it gives\nbroad permissions that could be specified more directly via\nthe [`.egress`](schema_singleuser.networkPolicy.egress).\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - } - } - }, - "interNamespaceAccessLabels": { - "enum": [ - "accept", - "ignore" - ], - "description": "This configuration option determines if both namespaces and pods\nin other namespaces, that have specific access labels, should be\naccepted to allow ingress (set to `accept`), or, if the labels are\nto be ignored when applied outside the local namespace (set to\n`ignore`).\n\nThe available access labels for respective NetworkPolicy resources\nare:\n\n- `hub.jupyter.org/network-access-hub: \"true\"` (hub)\n- `hub.jupyter.org/network-access-proxy-http: \"true\"` (proxy.chp, proxy.traefik)\n- `hub.jupyter.org/network-access-proxy-api: \"true\"` (proxy.chp)\n- `hub.jupyter.org/network-access-singleuser: \"true\"` (singleuser)\n" - }, - "allowedIngressPorts": { - "type": "array", - "description": "A rule to allow ingress on these ports will be added no matter\nwhat the origin of the request is. The default setting for\n`proxy.chp` and `proxy.traefik`'s networkPolicy configuration is\n`[http, https]`, while it is `[]` for other networkPolicies.\n\nNote that these port names or numbers target a Pod's port name or\nnumber, not a k8s Service's port name or number.\n" - } - } - }, - "extraCommandLineFlags": { - "type": "array", - "description": "A list of strings to be added as command line options when\nstarting\n[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy#command-line-options)\nthat will be expanded with Helm's template function `tpl` which\ncan render Helm template logic inside curly braces (`{{ ... }}`).\n\n```yaml\nproxy:\n chp:\n extraCommandLineFlags:\n - \"--auto-rewrite\"\n - \"--custom-header={{ .Values.custom.myStuff }}\"\n```\n\nNote that these will be appended last, and if you provide the same\nflag twice, the last flag will be used, which mean you can\noverride the default flag values as well.\n" - }, - "extraEnv": { - "type": [ - "object", - "array" - ], - "additionalProperties": true, - "description": "Extra environment variables that should be set for the chp pod.\n\nEnvironment variables are usually used here to:\n - override HUB_SERVICE_PORT or HUB_SERVICE_HOST default values\n - set CONFIGPROXY_SSL_KEY_PASSPHRASE for setting passphrase of SSL keys\n\nString literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which\nis a part of Kubernetes.\n\n```yaml\nproxy:\n chp:\n extraEnv:\n # basic notation (for literal values only)\n MY_ENV_VARS_NAME1: \"my env var value 1\"\n\n # explicit notation (the \"name\" field takes precedence)\n CHP_NAMESPACE:\n name: CHP_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n\n # implicit notation (the \"name\" field is implied)\n PREFIXED_CHP_NAMESPACE:\n value: \"my-prefix-$(CHP_NAMESPACE)\"\n SECRET_VALUE:\n valueFrom:\n secretKeyRef:\n name: my-k8s-secret\n key: password\n```\n\nFor more information, see the [Kubernetes EnvVar\nspecification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).\n" - }, - "pdb": { - "type": "object", - "additionalProperties": false, - "description": "Configure a PodDisruptionBudget for this Deployment.\n\nThese are disabled by default for our deployments that don't support\nbeing run in parallel with multiple replicas. Only the user-scheduler\ncurrently supports being run in parallel with multiple replicas. If\nthey are enabled for a Deployment with only one replica, they will\nblock `kubectl drain` of a node for example.\n\nNote that if you aim to block scaling down a node with the\nhub/proxy/autohttps pod that would cause disruptions of the\ndeployment, then you should instead annotate the pods of the\nDeployment [as described\nhere](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).\n\n \"cluster-autoscaler.kubernetes.io/safe-to-evict\": \"false\"\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)\nfor more details about disruptions.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Decides if a PodDisruptionBudget is created targeting the\nDeployment's pods.\n" - }, - "maxUnavailable": { - "type": [ - "integer", - "null" - ], - "description": "The maximum number of pods that can be unavailable during\nvoluntary disruptions.\n" - }, - "minAvailable": { - "type": [ - "integer", - "null" - ], - "description": "The minimum number of pods required to be available during\nvoluntary disruptions.\n" - } - } - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "livenessProbe": { - "type": "object", - "additionalProperties": true, - "required": [ - "enabled" - ], - "if": { - "properties": { - "enabled": { - "const": true - } - } - }, - "then": { - "description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n" - } - }, - "readinessProbe": { - "type": "object", - "additionalProperties": true, - "required": [ - "enabled" - ], - "if": { - "properties": { - "enabled": { - "const": true - } - } - }, - "then": { - "description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n" - } - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - }, - "defaultTarget": { - "type": [ - "string", - "null" - ], - "description": "Override the URL for the default routing target for the proxy.\nDefaults to JupyterHub itself.\nThis will generally only have an effect while JupyterHub is not running,\nas JupyterHub adds itself as the default target after it starts.\n" - }, - "errorTarget": { - "type": [ - "string", - "null" - ], - "description": "Override the URL for the error target for the proxy.\nDefaults to JupyterHub itself.\nUseful to reduce load on the Hub\nor produce more informative error messages than the Hub's default,\ne.g. in highly customized deployments such as BinderHub.\nSee Configurable HTTP Proxy for details on implementing an error target.\n" - }, - "extraPodSpec": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra k8s pod specification as a YAML object. The default\nvalue of this setting is an empty object, i.e. no extra configuration.\nThe value of this property is augmented to the pod specification as-is.\n\nThis is a powerful tool for expert k8s administrators with advanced\nconfiguration requirements. This setting should only be used for\nconfiguration that cannot be accomplished through the other settings.\nMisusing this setting can break your deployment and/or compromise\nyour system security.\n\nThis is one of four related settings for inserting arbitrary pod\nspecification:\n\n1. hub.extraPodSpec\n2. proxy.chp.extraPodSpec\n3. proxy.traefik.extraPodSpec\n4. scheduling.userScheduler.extraPodSpec\n5. scheduling.userPlaceholder.extraPodSpec\n\nOne real-world use of these settings is to enable host networking. For\nexample, to configure host networking for the hub pod, add the\nfollowing to your helm configuration values:\n\n```yaml\nhub:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nLikewise, to configure host networking for the proxy pod, add the\nfollowing:\n\n```yaml\nproxy:\n chp:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nN.B. Host networking has special security implications and can easily\nbreak your deployment. This is an example\u2014not an endorsement.\n\nSee [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)\nfor the latest pod resource specification.\n" - } - } - }, - "secretToken": { - "type": [ - "string", - "null" - ], - "description": "```{note}\nAs of version 1.0.0 this will automatically be generated and there is\nno need to set it manually.\n\nIf you wish to reset a generated key, you can use `kubectl edit` on\nthe k8s Secret typically named `hub` and remove the\n`hub.config.ConfigurableHTTPProxy.auth_token` entry in the k8s Secret,\nthen perform a new `helm upgrade`.\n```\n\nA 32-byte cryptographically secure randomly generated string used to\nsecure communications between the hub pod and the proxy pod running a\n[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)\ninstance.\n\n```sh\n# to generate a value, run\nopenssl rand -hex 32\n```\n\nChanging this value will cause the proxy and hub pods to restart. It is good security\npractice to rotate these values over time. If this secret leaks, *immediately* change\nit to something else, or user data can be compromised.\n" - }, - "service": { - "type": "object", - "additionalProperties": false, - "description": "Configuration of the k8s Service `proxy-public` which either will\npoint to the `autohttps` pod running Traefik for TLS termination, or\nthe `proxy` pod running ConfigurableHTTPProxy. Incoming traffic from\nusers on the internet should always go through this k8s Service.\n\nWhen this service targets the `autohttps` pod which then routes to the\n`proxy` pod, a k8s Service named `proxy-http` will be added targeting\nthe `proxy` pod and only accepting HTTP traffic on port 80.\n", - "properties": { - "type": { - "enum": [ - "ClusterIP", - "NodePort", - "LoadBalancer", - "ExternalName" - ], - "description": "Default `LoadBalancer`.\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)\nto learn more about service types.\n" - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the proxy service.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the service that is exposing the proxy.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\nfor more details about annotations.\n" - }, - "nodePorts": { - "type": "object", - "additionalProperties": false, - "description": "Object to set NodePorts to expose the service on for http and https.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)\nfor more details about NodePorts.\n", - "properties": { - "http": { - "type": [ - "integer", - "null" - ], - "description": "The HTTP port the proxy-public service should be exposed on.\n" - }, - "https": { - "type": [ - "integer", - "null" - ], - "description": "The HTTPS port the proxy-public service should be exposed on.\n" - } - } - }, - "loadBalancerPort": { - "type": "object", - "additionalProperties": false, - "description": "Object to set LoadBalancer ports for http and https.\nThis is an AUP Learning Cloud extension.\n", - "properties": { - "http": { - "type": [ - "integer", - "null" - ], - "description": "The HTTP port the LoadBalancer service should expose (default 80).\n" - }, - "https": { - "type": [ - "integer", - "null" - ], - "description": "The HTTPS port the LoadBalancer service should expose (default 443).\n" - } - } - }, - "disableHttpPort": { - "type": "boolean", - "description": "Default `false`.\n\nIf `true`, port 80 for incoming HTTP traffic will no longer be exposed.\nDo not use with `proxy.https.type=letsencrypt` or `proxy.https.enabled=false`.\n" - }, - "extraPorts": { - "type": "array", - "description": "Extra ports the k8s Service should accept incoming traffic on,\nwhich will be redirected to either the `autohttps` pod (treafik)\nor the `proxy` pod (chp).\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#serviceport-v1-core)\nfor the structure of the items in this list.\n" - }, - "externalIPs": { - "type": "array", - "description": "A list of external IP addresses (unmanaged by Kubernetes) for which\nnodes in the cluster will accept traffic for this service.\nSee [externalIPs](https://kubernetes.io/docs/concepts/services-networking/service/#external-ips) for more details.\n" - }, - "loadBalancerIP": { - "type": [ - "string", - "null" - ], - "description": "The public IP address the proxy-public Kubernetes service should\nbe exposed on. This entry will end up at the configurable proxy\nserver that JupyterHub manages, which will direct traffic to user\npods at the `/user` path and the hub pod at the `/hub` path.\n\nSet this if you want to use a fixed external IP address instead of\na dynamically acquired one. This is relevant if you have a domain\nname that you want to point to a specific IP and want to ensure it\ndoesn't change.\n" - }, - "loadBalancerSourceRanges": { - "type": "array", - "description": "A list of IP CIDR ranges that are allowed to access the load balancer service.\nDefaults to allowing everyone to access it.\n" - }, - "ipFamilyPolicy": { - "type": [ - "string" - ], - "description": "See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services)\nfor more info.\n" - }, - "ipFamilies": { - "type": "array", - "description": "See the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services)\nfor more info.\n" - } - } - }, - "https": { - "type": "object", - "additionalProperties": false, - "description": "Object for customizing the settings for HTTPS used by the JupyterHub's proxy.\nFor more information on configuring HTTPS for your JupyterHub, see the [HTTPS section in our security guide](https)\n", - "properties": { - "enabled": { - "type": [ - "boolean", - "null" - ], - "description": "Indicator to set whether HTTPS should be enabled or not on the proxy. Defaults to `true` if the https object is provided.\n" - }, - "type": { - "enum": [ - null, - "", - "letsencrypt", - "manual", - "offload", - "secret" - ], - "description": "The type of HTTPS encryption. Decides on ports and network policies.\nSetting to `secret` uses manual HTTPS with `https.secret`. Defaults to `letsencrypt`.\n" - }, - "letsencrypt": { - "type": "object", - "additionalProperties": false, - "properties": { - "contactEmail": { - "type": [ - "string", - "null" - ], - "description": "The contact email to be used for automatically provisioned HTTPS certificates by Let's Encrypt. For more information see [Set up automatic HTTPS](setup-automatic-https).\nRequired for automatic HTTPS.\n" - }, - "acmeServer": { - "type": [ - "string", - "null" - ], - "description": "Let's Encrypt is one of various ACME servers that can provide\na certificate, and by default their production server is used.\n\nLet's Encrypt staging: https://acme-staging-v02.api.letsencrypt.org/directory\nLet's Encrypt production: acmeServer: https://acme-v02.api.letsencrypt.org/directory\n" - } - } - }, - "manual": { - "type": "object", - "additionalProperties": false, - "description": "Object for providing own certificates for manual HTTPS configuration. To be provided when setting `https.type` to `manual`.\nSee [Set up manual HTTPS](setup-manual-https)\n", - "properties": { - "key": { - "type": [ - "string", - "null" - ], - "description": "The RSA private key to be used for HTTPS.\nTo be provided in the form of\n\n```\nkey: |\n -----BEGIN RSA PRIVATE KEY-----\n ...\n -----END RSA PRIVATE KEY-----\n```\n" - }, - "cert": { - "type": [ - "string", - "null" - ], - "description": "The certificate to be used for HTTPS.\nTo be provided in the form of\n\n```\ncert: |\n -----BEGIN CERTIFICATE-----\n ...\n -----END CERTIFICATE-----\n```\n" - } - } - }, - "secret": { - "type": "object", - "additionalProperties": false, - "description": "Secret to be provided when setting `https.type` to `secret`.\n", - "properties": { - "name": { - "type": [ - "string", - "null" - ], - "description": "Name of the secret\n" - }, - "key": { - "type": [ - "string", - "null" - ], - "description": "Path to the private key to be used for HTTPS.\nExample: `'tls.key'`\n" - }, - "crt": { - "type": [ - "string", - "null" - ], - "description": "Path to the certificate to be used for HTTPS.\nExample: `'tls.crt'`\n" - } - } - }, - "hosts": { - "type": "array", - "description": "You domain in list form.\nRequired for automatic HTTPS. See [Set up automatic HTTPS](setup-automatic-https).\nTo be provided like:\n```\nhosts:\n - \n```\n" - } - } - }, - "traefik": { - "type": "object", - "additionalProperties": false, - "description": "Configure the traefik proxy used to terminate TLS when 'autohttps' is enabled\n", - "properties": { - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the traefik pod.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "networkPolicy": { - "type": "object", - "additionalProperties": false, - "description": "This configuration regards the creation and configuration of a k8s\n_NetworkPolicy resource_.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Toggle the creation of the NetworkPolicy resource targeting this\npod, and by doing so, restricting its communication to only what\nis explicitly allowed in the NetworkPolicy.\n" - }, - "ingress": { - "type": "array", - "description": "Additional ingress rules to add besides those that are required\nfor core functionality.\n" - }, - "egress": { - "type": "array", - "description": "Additional egress rules to add besides those that are required for\ncore functionality and those added via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).\n\n```{versionchanged} 2.0.0\nThe default value changed from providing one very permissive rule\nallowing all egress to providing no rule. The permissive rule is\nstill provided via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)\nset to true though.\n```\n\nAs an example, below is a configuration that disables the more\nbroadly permissive `.privateIPs` egress allow rule for the hub\npod, and instead provides tightly scoped permissions to access a\nspecific k8s local service as identified by pod labels.\n\n```yaml\nhub:\n networkPolicy:\n egressAllowRules:\n privateIPs: false\n egress:\n - to:\n - podSelector:\n matchLabels:\n app.kubernetes.io/name: my-k8s-local-service\n ports:\n - protocol: TCP\n port: 5978\n```\n" - }, - "egressAllowRules": { - "type": "object", - "additionalProperties": false, - "description": "This is a set of predefined rules that when enabled will be added\nto the NetworkPolicy list of egress rules.\n\nThe resulting egress rules will be a composition of:\n- rules specific for the respective pod(s) function within the\n Helm chart\n- rules based on enabled `egressAllowRules` flags\n- rules explicitly specified by the user\n\n```{note}\nEach flag under this configuration will not render into a\ndedicated rule in the NetworkPolicy resource, but instead combine\nwith the other flags to a reduced set of rules to avoid a\nperformance penalty.\n```\n\n```{versionadded} 2.0.0\n```\n", - "properties": { - "cloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server.\n\nNote that the `nonPrivateIPs` rule is allowing all non Private\nIP ranges but makes an exception for the cloud metadata\nserver, leaving this as the definitive configuration to allow\naccess to the cloud metadata server.\n\n```{versionchanged} 3.0.0\nThis configuration is not allowed to be configured true at the\nsame time as\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n```\n" - }, - "dnsPortsCloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server\nvia port 53.\n\nRelying on this rule for the singleuser config should go hand\nin hand with disabling\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n\nKnown situations when this rule can be relevant:\n\n- In GKE clusters with Cloud DNS that is reached at the\n cloud metadata server's non-private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsKubeSystemNamespace": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to pods in the kube-system\nnamespace via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE, EKS, AKS, and other clusters relying directly on\n `kube-dns` or `coredns` pods in the `kube-system` namespace.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to private IPs via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE clusters relying on a DNS server indirectly via a a node\n local DNS cache at an unknown private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - }, - "nonPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the non-private IP ranges\nwith the exception of the cloud metadata server. This means\nrespective pod(s) can establish connections to the internet\nbut not (say) an unsecured prometheus server running in the\nsame cluster.\n" - }, - "privateIPs": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nPrivate IPs refer to the IP ranges `10.0.0.0/8`,\n`172.16.0.0/12`, `192.168.0.0/16`.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the internal k8s cluster.\nThis means users can access the internet but not (say) an\nunsecured prometheus server running in the same cluster.\n\nSince not all workloads in the k8s cluster may have\nNetworkPolicies setup to restrict their incoming connections,\nhaving this set to false can be a good defense against\nmalicious intent from someone in control of software in these\npods.\n\nIf possible, try to avoid setting this to true as it gives\nbroad permissions that could be specified more directly via\nthe [`.egress`](schema_singleuser.networkPolicy.egress).\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - } - } - }, - "interNamespaceAccessLabels": { - "enum": [ - "accept", - "ignore" - ], - "description": "This configuration option determines if both namespaces and pods\nin other namespaces, that have specific access labels, should be\naccepted to allow ingress (set to `accept`), or, if the labels are\nto be ignored when applied outside the local namespace (set to\n`ignore`).\n\nThe available access labels for respective NetworkPolicy resources\nare:\n\n- `hub.jupyter.org/network-access-hub: \"true\"` (hub)\n- `hub.jupyter.org/network-access-proxy-http: \"true\"` (proxy.chp, proxy.traefik)\n- `hub.jupyter.org/network-access-proxy-api: \"true\"` (proxy.chp)\n- `hub.jupyter.org/network-access-singleuser: \"true\"` (singleuser)\n" - }, - "allowedIngressPorts": { - "type": "array", - "description": "A rule to allow ingress on these ports will be added no matter\nwhat the origin of the request is. The default setting for\n`proxy.chp` and `proxy.traefik`'s networkPolicy configuration is\n`[http, https]`, while it is `[]` for other networkPolicies.\n\nNote that these port names or numbers target a Pod's port name or\nnumber, not a k8s Service's port name or number.\n" - } - } - }, - "extraInitContainers": { - "type": "array", - "description": "list of extraInitContainers to be run with traefik pod, after the containers set in the chart. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)\n\n```yaml\nproxy:\n traefik:\n extraInitContainers:\n - name: init-myservice\n image: busybox:1.28\n command: ['sh', '-c', 'command1']\n - name: init-mydb\n image: busybox:1.28\n command: ['sh', '-c', 'command2']\n```\n" - }, - "extraEnv": { - "type": [ - "object", - "array" - ], - "additionalProperties": true, - "description": "Extra environment variables that should be set for the traefik pod.\n\nEnvironment Variables here may be used to configure traefik.\n\nString literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which\nis a part of Kubernetes.\n\n```yaml\nproxy:\n traefik:\n extraEnv:\n # basic notation (for literal values only)\n MY_ENV_VARS_NAME1: \"my env var value 1\"\n\n # explicit notation (the \"name\" field takes precedence)\n TRAEFIK_NAMESPACE:\n name: TRAEFIK_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n\n # implicit notation (the \"name\" field is implied)\n PREFIXED_TRAEFIK_NAMESPACE:\n value: \"my-prefix-$(TRAEFIK_NAMESPACE)\"\n SECRET_VALUE:\n valueFrom:\n secretKeyRef:\n name: my-k8s-secret\n key: password\n```\n\nFor more information, see the [Kubernetes EnvVar\nspecification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).\n" - }, - "pdb": { - "type": "object", - "additionalProperties": false, - "description": "Configure a PodDisruptionBudget for this Deployment.\n\nThese are disabled by default for our deployments that don't support\nbeing run in parallel with multiple replicas. Only the user-scheduler\ncurrently supports being run in parallel with multiple replicas. If\nthey are enabled for a Deployment with only one replica, they will\nblock `kubectl drain` of a node for example.\n\nNote that if you aim to block scaling down a node with the\nhub/proxy/autohttps pod that would cause disruptions of the\ndeployment, then you should instead annotate the pods of the\nDeployment [as described\nhere](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).\n\n \"cluster-autoscaler.kubernetes.io/safe-to-evict\": \"false\"\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)\nfor more details about disruptions.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Decides if a PodDisruptionBudget is created targeting the\nDeployment's pods.\n" - }, - "maxUnavailable": { - "type": [ - "integer", - "null" - ], - "description": "The maximum number of pods that can be unavailable during\nvoluntary disruptions.\n" - }, - "minAvailable": { - "type": [ - "integer", - "null" - ], - "description": "The minimum number of pods required to be available during\nvoluntary disruptions.\n" - } - } - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "extraDynamicConfig": { - "type": "object", - "additionalProperties": true, - "description": "This refers to traefik's post-startup configuration.\n\nThis Helm chart already provide such configuration, so this is a\nplace where you can merge in additional configuration. If you are\nabout to use this configuration, you may want to inspect the\ndefault configuration declared\n[here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml).\n" - }, - "extraPorts": { - "type": "array", - "description": "Extra ports for the traefik container within the autohttps pod\nthat you would like to expose, formatted in a k8s native way.\n" - }, - "extraStaticConfig": { - "type": "object", - "additionalProperties": true, - "description": "This refers to traefik's startup configuration.\n\nThis Helm chart already provide such configuration, so this is a\nplace where you can merge in additional configuration. If you are\nabout to use this configuration, you may want to inspect the\ndefault configuration declared\n[here](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml).\n" - }, - "extraVolumes": { - "type": "array", - "description": "Additional volumes for the Pod. Use a k8s native syntax.\n" - }, - "extraVolumeMounts": { - "type": "array", - "description": "Additional volume mounts for the Container. Use a k8s native syntax.\n" - }, - "hsts": { - "type": "object", - "additionalProperties": false, - "required": [ - "includeSubdomains", - "maxAge", - "preload" - ], - "description": "This section regards a HTTP Strict-Transport-Security (HSTS)\nresponse header. It can act as a request for a visiting web\nbrowsers to enforce HTTPS on their end in for a given time into\nthe future, and optionally also for future requests to subdomains.\n\nThese settings relate to traefik configuration which we use as a\nTLS termination proxy.\n\nSee [Mozilla's\ndocumentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Strict-Transport-Security)\nfor more information.\n", - "properties": { - "includeSubdomains": { - "type": "boolean" - }, - "maxAge": { - "type": "integer" - }, - "preload": { - "type": "boolean" - } - } - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - }, - "serviceAccount": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "extraPodSpec": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra k8s pod specification as a YAML object. The default\nvalue of this setting is an empty object, i.e. no extra configuration.\nThe value of this property is augmented to the pod specification as-is.\n\nThis is a powerful tool for expert k8s administrators with advanced\nconfiguration requirements. This setting should only be used for\nconfiguration that cannot be accomplished through the other settings.\nMisusing this setting can break your deployment and/or compromise\nyour system security.\n\nThis is one of four related settings for inserting arbitrary pod\nspecification:\n\n1. hub.extraPodSpec\n2. proxy.chp.extraPodSpec\n3. proxy.traefik.extraPodSpec\n4. scheduling.userScheduler.extraPodSpec\n5. scheduling.userPlaceholder.extraPodSpec\n\nOne real-world use of these settings is to enable host networking. For\nexample, to configure host networking for the hub pod, add the\nfollowing to your helm configuration values:\n\n```yaml\nhub:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nLikewise, to configure host networking for the proxy pod, add the\nfollowing:\n\n```yaml\nproxy:\n chp:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nN.B. Host networking has special security implications and can easily\nbreak your deployment. This is an example\u2014not an endorsement.\n\nSee [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)\nfor the latest pod resource specification.\n" - } - } - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "K8s labels for the proxy pod.\n\n```{note}\nFor consistency, this should really be located under\nproxy.chp.labels but isn't for historical reasons.\n```\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "K8s annotations for the proxy pod.\n\n```{note}\nFor consistency, this should really be located under\nproxy.chp.annotations but isn't for historical reasons.\n```\n" - }, - "deploymentStrategy": { - "type": "object", - "additionalProperties": false, - "properties": { - "rollingUpdate": { - "type": [ - "string", - "null" - ] - }, - "type": { - "type": [ - "string", - "null" - ], - "description": "While the proxy pod running\n[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)\ncould run in parallel, two instances running in parallel wouldn't\nboth receive updates from JupyterHub regarding how it should route\ntraffic. Due to this we default to using a deployment strategy of\nRecreate instead of RollingUpdate.\n" - } - } - }, - "secretSync": { - "type": "object", - "additionalProperties": false, - "description": "This configuration section refers to configuration of the sidecar\ncontainer in the autohttps pod running next to its traefik container\nresponsible for TLS termination.\n\nThe purpose of this container is to store away and load TLS\ncertificates from a k8s Secret. The TLS certificates are acquired by\nthe ACME client (LEGO) that is running within the traefik container,\nwhere traefik is using them for TLS termination.\n", - "properties": { - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - } - } - } - } - }, - "singleuser": { - "type": "object", - "additionalProperties": false, - "description": "Options for customizing the environment that is provided to the users after they log in.\n", - "properties": { - "networkPolicy": { - "type": "object", - "additionalProperties": false, - "description": "This configuration regards the creation and configuration of a k8s\n_NetworkPolicy resource_.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Toggle the creation of the NetworkPolicy resource targeting this\npod, and by doing so, restricting its communication to only what\nis explicitly allowed in the NetworkPolicy.\n" - }, - "ingress": { - "type": "array", - "description": "Additional ingress rules to add besides those that are required\nfor core functionality.\n" - }, - "egress": { - "type": "array", - "description": "Additional egress rules to add besides those that are required for\ncore functionality and those added via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules).\n\n```{versionchanged} 2.0.0\nThe default value changed from providing one very permissive rule\nallowing all egress to providing no rule. The permissive rule is\nstill provided via\n[`.egressAllowRules`](schema_hub.networkPolicy.egressAllowRules)\nset to true though.\n```\n\nAs an example, below is a configuration that disables the more\nbroadly permissive `.privateIPs` egress allow rule for the hub\npod, and instead provides tightly scoped permissions to access a\nspecific k8s local service as identified by pod labels.\n\n```yaml\nhub:\n networkPolicy:\n egressAllowRules:\n privateIPs: false\n egress:\n - to:\n - podSelector:\n matchLabels:\n app.kubernetes.io/name: my-k8s-local-service\n ports:\n - protocol: TCP\n port: 5978\n```\n" - }, - "egressAllowRules": { - "type": "object", - "additionalProperties": false, - "description": "This is a set of predefined rules that when enabled will be added\nto the NetworkPolicy list of egress rules.\n\nThe resulting egress rules will be a composition of:\n- rules specific for the respective pod(s) function within the\n Helm chart\n- rules based on enabled `egressAllowRules` flags\n- rules explicitly specified by the user\n\n```{note}\nEach flag under this configuration will not render into a\ndedicated rule in the NetworkPolicy resource, but instead combine\nwith the other flags to a reduced set of rules to avoid a\nperformance penalty.\n```\n\n```{versionadded} 2.0.0\n```\n", - "properties": { - "cloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server.\n\nNote that the `nonPrivateIPs` rule is allowing all non Private\nIP ranges but makes an exception for the cloud metadata\nserver, leaving this as the definitive configuration to allow\naccess to the cloud metadata server.\n\n```{versionchanged} 3.0.0\nThis configuration is not allowed to be configured true at the\nsame time as\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n```\n" - }, - "dnsPortsCloudMetadataServer": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the cloud metadata server\nvia port 53.\n\nRelying on this rule for the singleuser config should go hand\nin hand with disabling\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nto avoid an ambiguous configuration.\n\nKnown situations when this rule can be relevant:\n\n- In GKE clusters with Cloud DNS that is reached at the\n cloud metadata server's non-private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsKubeSystemNamespace": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to pods in the kube-system\nnamespace via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE, EKS, AKS, and other clusters relying directly on\n `kube-dns` or `coredns` pods in the `kube-system` namespace.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n```\n\n```{versionadded} 3.0.0\n```\n" - }, - "dnsPortsPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to private IPs via port 53.\n\nKnown situations when this rule can be relevant:\n\n- GKE clusters relying on a DNS server indirectly via a a node\n local DNS cache at an unknown private IP.\n\n```{note}\nThis chart doesn't know how to identify the DNS server that\npods will rely on due to variations between how k8s clusters\nhave been setup. Due to that, multiple rules are enabled by\ndefault to ensure DNS connectivity.\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - }, - "nonPrivateIPs": { - "type": "boolean", - "description": "Defaults to `true` for all network policies.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the non-private IP ranges\nwith the exception of the cloud metadata server. This means\nrespective pod(s) can establish connections to the internet\nbut not (say) an unsecured prometheus server running in the\nsame cluster.\n" - }, - "privateIPs": { - "type": "boolean", - "description": "Defaults to `false` for singleuser servers, but to `true` for\nall other network policies.\n\nPrivate IPs refer to the IP ranges `10.0.0.0/8`,\n`172.16.0.0/12`, `192.168.0.0/16`.\n\nWhen enabled this rule allows the respective pod(s) to\nestablish outbound connections to the internal k8s cluster.\nThis means users can access the internet but not (say) an\nunsecured prometheus server running in the same cluster.\n\nSince not all workloads in the k8s cluster may have\nNetworkPolicies setup to restrict their incoming connections,\nhaving this set to false can be a good defense against\nmalicious intent from someone in control of software in these\npods.\n\nIf possible, try to avoid setting this to true as it gives\nbroad permissions that could be specified more directly via\nthe [`.egress`](schema_singleuser.networkPolicy.egress).\n\n```{warning}\nThis rule is not expected to work in clusters relying on\nCilium to enforce the NetworkPolicy rules (includes GKE\nclusters with Dataplane v2), this is due to a [known\nlimitation](https://github.com/cilium/cilium/issues/9209).\n```\n" - } - } - }, - "interNamespaceAccessLabels": { - "enum": [ - "accept", - "ignore" - ], - "description": "This configuration option determines if both namespaces and pods\nin other namespaces, that have specific access labels, should be\naccepted to allow ingress (set to `accept`), or, if the labels are\nto be ignored when applied outside the local namespace (set to\n`ignore`).\n\nThe available access labels for respective NetworkPolicy resources\nare:\n\n- `hub.jupyter.org/network-access-hub: \"true\"` (hub)\n- `hub.jupyter.org/network-access-proxy-http: \"true\"` (proxy.chp, proxy.traefik)\n- `hub.jupyter.org/network-access-proxy-api: \"true\"` (proxy.chp)\n- `hub.jupyter.org/network-access-singleuser: \"true\"` (singleuser)\n" - }, - "allowedIngressPorts": { - "type": "array", - "description": "A rule to allow ingress on these ports will be added no matter\nwhat the origin of the request is. The default setting for\n`proxy.chp` and `proxy.traefik`'s networkPolicy configuration is\n`[http, https]`, while it is `[]` for other networkPolicies.\n\nNote that these port names or numbers target a Pod's port name or\nnumber, not a k8s Service's port name or number.\n" - } - } - }, - "podNameTemplate": { - "type": [ - "string", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.pod_name_template](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.pod_name_template).\n" - }, - "cpu": { - "type": "object", - "additionalProperties": false, - "description": "Set CPU limits & guarantees that are enforced for each user.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)\nfor more info.\n", - "properties": { - "limit": { - "type": [ - "number", - "null" - ] - }, - "guarantee": { - "type": [ - "number", - "null" - ] - } - } - }, - "memory": { - "type": "object", - "additionalProperties": false, - "description": "Set Memory limits & guarantees that are enforced for each user.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)\nfor more info.\n", - "properties": { - "limit": { - "type": [ - "number", - "string", - "null" - ] - }, - "guarantee": { - "type": [ - "number", - "string", - "null" - ], - "description": "Note that this field is referred to as *requests* by the Kubernetes API.\n" - } - } - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "initContainers": { - "type": "array", - "description": "list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)\n\n```yaml\nsingleuser:\n initContainers:\n - name: init-myservice\n image: busybox:1.28\n command: ['sh', '-c', 'command1']\n - name: init-mydb\n image: busybox:1.28\n command: ['sh', '-c', 'command2']\n```\n" - }, - "profileList": { - "type": "array", - "description": "For more information about the profile list, see [KubeSpawner's\ndocumentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner)\nas this is simply a passthrough to that configuration.\n\n```{note}\nThe image-pullers are aware of the overrides of images in\n`singleuser.profileList` but they won't be if you configure it in\nJupyterHub's configuration of '`c.KubeSpawner.profile_list`.\n```\n\n```yaml\nsingleuser:\n profileList:\n - display_name: \"Default: Shared, 8 CPU cores\"\n description: \"Your code will run on a shared machine with CPU only.\"\n default: True\n - display_name: \"Personal, 4 CPU cores & 26GB RAM, 1 NVIDIA Tesla K80 GPU\"\n description: \"Your code will run a personal machine with a GPU.\"\n kubespawner_override:\n extra_resource_limits:\n nvidia.com/gpu: \"1\"\n```\n" - }, - "extraFiles": { - "type": "object", - "additionalProperties": false, - "description": "A dictionary with extra files to be injected into the pod's container\non startup. This can for example be used to inject: configuration\nfiles, custom user interface templates, images, and more.\n\n```yaml\n# NOTE: \"hub\" is used in this example, but the configuration is the\n# same for \"singleuser\".\nhub:\n extraFiles:\n # The file key is just a reference that doesn't influence the\n # actual file name.\n :\n # mountPath is required and must be the absolute file path.\n mountPath: \n\n # Choose one out of the three ways to represent the actual file\n # content: data, stringData, or binaryData.\n #\n # data should be set to a mapping (dictionary). It will in the\n # end be rendered to either YAML, JSON, or TOML based on the\n # filename extension that are required to be either .yaml, .yml,\n # .json, or .toml.\n #\n # If your content is YAML, JSON, or TOML, it can make sense to\n # use data to represent it over stringData as data can be merged\n # instead of replaced if set partially from separate Helm\n # configuration files.\n #\n # Both stringData and binaryData should be set to a string\n # representing the content, where binaryData should be the\n # base64 encoding of the actual file content.\n #\n data:\n myConfig:\n myMap:\n number: 123\n string: \"hi\"\n myList:\n - 1\n - 2\n stringData: |\n hello world!\n binaryData: aGVsbG8gd29ybGQhCg==\n\n # mode is by default 0644 and you can optionally override it\n # either by octal notation (example: 0400) or decimal notation\n # (example: 256).\n mode: \n```\n\n**Using --set-file**\n\nTo avoid embedding entire files in the Helm chart configuration, you\ncan use the `--set-file` flag during `helm upgrade` to set the\nstringData or binaryData field.\n\n```yaml\nhub:\n extraFiles:\n my_image:\n mountPath: /usr/local/share/jupyterhub/static/my_image.png\n\n # Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are\n # automatically loaded in alphabetical order of the final file\n # name when JupyterHub starts.\n my_config:\n mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_jupyterhub_config.py\n```\n\n```bash\n# --set-file expects a text based file, so you need to base64 encode\n# it manually first.\nbase64 my_image.png > my_image.png.b64\n\nhelm upgrade <...> \\\n --set-file hub.extraFiles.my_image.binaryData=./my_image.png.b64 \\\n --set-file hub.extraFiles.my_config.stringData=./my_jupyterhub_config.py\n```\n\n**Common uses**\n\n1. **JupyterHub template customization**\n\n You can replace the default JupyterHub user interface templates in\n the hub pod by injecting new ones to\n `/usr/local/share/jupyterhub/templates`. These can in turn\n reference custom images injected to\n `/usr/local/share/jupyterhub/static`.\n\n1. **JupyterHub standalone file config**\n\n Instead of embedding JupyterHub python configuration as a string\n within a YAML file through\n [`hub.extraConfig`](schema_hub.extraConfig), you can inject a\n standalone .py file into\n `/usr/local/etc/jupyterhub/jupyterhub_config.d` that is\n automatically loaded.\n\n1. **Flexible configuration**\n\n By injecting files, you don't have to embed them in a docker image\n that you have to rebuild.\n\n If your configuration file is a YAML/JSON/TOML file, you can also\n use `data` instead of `stringData` which allow you to set various\n configuration in separate Helm config files. This can be useful to\n help dependent charts override only some configuration part of the\n file, or to allow for the configuration be set through multiple\n Helm configuration files.\n\n**Limitations**\n\n1. File size\n\n The files in `hub.extraFiles` and `singleuser.extraFiles` are\n respectively stored in their own k8s Secret resource. As k8s\n Secret's are limited, typically to 1MB, you will be limited to a\n total file size of less than 1MB as there is also base64 encoding\n that takes place reducing available capacity to 75%.\n\n2. File updates\n\n The files that are mounted are only set during container startup.\n This is [because we use\n `subPath`](https://kubernetes.io/docs/concepts/storage/volumes/#secret)\n as is required to avoid replacing the content of the entire\n directory we mount in.\n", - "patternProperties": { - ".*": { - "type": "object", - "additionalProperties": false, - "required": [ - "mountPath" - ], - "oneOf": [ - { - "required": [ - "data" - ] - }, - { - "required": [ - "stringData" - ] - }, - { - "required": [ - "binaryData" - ] - } - ], - "properties": { - "mountPath": { - "type": "string" - }, - "data": { - "type": "object", - "additionalProperties": true - }, - "stringData": { - "type": "string" - }, - "binaryData": { - "type": "string" - }, - "mode": { - "type": "number" - } - } - } - } - }, - "extraEnv": { - "type": [ - "object", - "array" - ], - "additionalProperties": true, - "description": "Extra environment variables that should be set for the user pods.\n\nString literals with `$(ENV_VAR_NAME)` will be expanded by Kubelet which\nis a part of Kubernetes. Note that the user pods will already have\naccess to a set of environment variables that you can use, like\n`JUPYTERHUB_USER` and `JUPYTERHUB_HOST`. For more information about these\ninspect [this source\ncode](https://github.com/jupyterhub/jupyterhub/blob/cc8e7806530466dce8968567d1bbd2b39a7afa26/jupyterhub/spawner.py#L763).\n\n```yaml\nsingleuser:\n extraEnv:\n # basic notation (for literal values only)\n MY_ENV_VARS_NAME1: \"my env var value 1\"\n\n # explicit notation (the \"name\" field takes precedence)\n USER_NAMESPACE:\n name: USER_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n\n # implicit notation (the \"name\" field is implied)\n PREFIXED_USER_NAMESPACE:\n value: \"my-prefix-$(USER_NAMESPACE)\"\n SECRET_VALUE:\n valueFrom:\n secretKeyRef:\n name: my-k8s-secret\n key: password\n```\n\nFor more information, see the [Kubernetes EnvVar\nspecification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#envvar-v1-core).\n" - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "extraTolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "extraNodeAffinity": { - "type": "object", - "additionalProperties": false, - "description": "Affinities describe where pods prefer or require to be scheduled, they\nmay prefer or require a node where they are to be scheduled to have a\ncertain label (node affinity). They may also require to be scheduled\nin proximity or with a lack of proximity to another pod (pod affinity\nand anti pod affinity).\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)\nfor more info.\n", - "properties": { - "required": { - "type": "array", - "description": "Pass this field an array of\n[`NodeSelectorTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#nodeselectorterm-v1-core)\nobjects.\n" - }, - "preferred": { - "type": "array", - "description": "Pass this field an array of\n[`PreferredSchedulingTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#preferredschedulingterm-v1-core)\nobjects.\n" - } - } - }, - "extraPodAffinity": { - "type": "object", - "additionalProperties": false, - "description": "See the description of `singleuser.extraNodeAffinity`.\n", - "properties": { - "required": { - "type": "array", - "description": "Pass this field an array of\n[`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)\nobjects.\n" - }, - "preferred": { - "type": "array", - "description": "Pass this field an array of\n[`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)\nobjects.\n" - } - } - }, - "extraPodAntiAffinity": { - "type": "object", - "additionalProperties": false, - "description": "See the description of `singleuser.extraNodeAffinity`.\n", - "properties": { - "required": { - "type": "array", - "description": "Pass this field an array of\n[`PodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podaffinityterm-v1-core)\nobjects.\n" - }, - "preferred": { - "type": "array", - "description": "Pass this field an array of\n[`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#weightedpodaffinityterm-v1-core)\nobjects.\n" - } - } - }, - "cloudMetadata": { - "type": "object", - "additionalProperties": false, - "required": [ - "blockWithIptables", - "ip" - ], - "description": "Please refer to dedicated section in [the Helm chart\ndocumentation](block-metadata-iptables) for more information about\nthis.\n", - "properties": { - "blockWithIptables": { - "type": "boolean" - }, - "ip": { - "type": "string" - } - } - }, - "cmd": { - "type": [ - "array", - "string", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.cmd](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.cmd).\nThe default is \"jupyterhub-singleuser\".\nUse `cmd: null` to launch a custom CMD from the image,\nwhich must launch jupyterhub-singleuser or an equivalent process eventually.\nFor example: Jupyter's docker-stacks images.\n" - }, - "defaultUrl": { - "type": [ - "string", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.default_url](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.default_url).\n" - }, - "events": { - "type": [ - "boolean", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.events_enabled](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.events_enabled).\n" - }, - "extraAnnotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Passthrough configuration for\n[KubeSpawner.extra_annotations](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_annotations).\n" - }, - "extraContainers": { - "type": "array", - "description": "Passthrough configuration for\n[KubeSpawner.extra_containers](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_containers).\n" - }, - "extraLabels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Passthrough configuration for\n[KubeSpawner.extra_labels](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_labels).\n" - }, - "extraPodConfig": { - "type": "object", - "additionalProperties": true, - "description": "Passthrough configuration for\n[KubeSpawner.extra_pod_config](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_pod_config).\n" - }, - "extraResource": { - "type": "object", - "additionalProperties": false, - "properties": { - "guarantees": { - "type": "object", - "additionalProperties": true, - "description": "Passthrough configuration for\n[KubeSpawner.extra_resource_guarantees](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_guarantees).\n" - }, - "limits": { - "type": "object", - "additionalProperties": true, - "description": "Passthrough configuration for\n[KubeSpawner.extra_resource_limits](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.extra_resource_limits).\n" - } - } - }, - "fsGid": { - "type": [ - "integer", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.fs_gid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.fs_gid).\n" - }, - "lifecycleHooks": { - "type": "object", - "additionalProperties": false, - "description": "Passthrough configuration for\n[KubeSpawner.lifecycle_hooks](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.lifecycle_hooks).\n", - "properties": { - "postStart": { - "type": "object", - "additionalProperties": true - }, - "preStop": { - "type": "object", - "additionalProperties": true - } - } - }, - "networkTools": { - "type": "object", - "additionalProperties": false, - "description": "This configuration section refers to configuration of a conditionally\ncreated initContainer for the user pods with a purpose to block a\nspecific IP address.\n\nThis initContainer will be created if\n[`singleuser.cloudMetadata.blockWithIptables`](schema_singleuser.cloudMetadata.blockWithIptables)\nis set to true.\n", - "properties": { - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - } - } - }, - "serviceAccountName": { - "type": [ - "string", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.service_account](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.service_account).\n" - }, - "startTimeout": { - "type": [ - "integer", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.start_timeout](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.start_timeout).\n" - }, - "storage": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "homeMountPath" - ], - "description": "This section configures KubeSpawner directly to some extent but also\nindirectly through Helm chart specific configuration options such as\n[`singleuser.storage.type`](schema_singleuser.storage.type).\n", - "properties": { - "capacity": { - "type": [ - "string", - "null" - ], - "description": "Configures `KubeSpawner.storage_capacity`.\n\nSee the [KubeSpawner\ndocumentation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html)\nfor more information.\n" - }, - "dynamic": { - "type": "object", - "additionalProperties": false, - "properties": { - "pvcNameTemplate": { - "type": [ - "string", - "null" - ], - "description": "Configures `KubeSpawner.pvc_name_template` which will be the\nresource name of the PVC created by KubeSpawner for each user\nif needed.\n" - }, - "storageAccessModes": { - "type": "array", - "items": { - "type": [ - "string", - "null" - ] - }, - "description": "Configures `KubeSpawner.storage_access_modes`.\n\nSee KubeSpawners documentation and [the k8s\ndocumentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)\nfor more information.\n" - }, - "storageClass": { - "type": [ - "string", - "null" - ], - "description": "Configures `KubeSpawner.storage_class`, which can be an\nexplicit StorageClass to dynamically provision storage for the\nPVC that KubeSpawner will create.\n\nThere is of a default StorageClass available in k8s clusters\nfor use if this is unspecified.\n" - }, - "subPath": { - "type": [ - "string", - "null" - ], - "description": "Configures the `subPath` field of a\n`KubeSpawner.volume_mounts` entry added by the Helm chart.\n\nPath within the volume from which the container's volume\nshould be mounted. Defaults to \"\" (volume's root).\n" - }, - "volumeNameTemplate": { - "type": [ - "string", - "null" - ], - "description": "Configures `KubeSpawner.volume_name_template`, which is the\nname to reference from the containers volumeMounts section.\n" - } - } - }, - "extraLabels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Configures `KubeSpawner.storage_extra_labels`. Note that these\nlabels are set on the PVC during creation only and won't be\nupdated after creation.\n" - }, - "extraVolumeMounts": { - "type": [ - "object", - "array", - "null" - ], - "description": "Injects extra volume mounts into `KubeSpawner.volume_mounts` dictionary.\nCan be a dictionary or an array.\nIf it's an array, each item must be a volume mount configuration in k8s\nnative syntax. A combination of the volume name and its index is used as the key\nin `KubeSpawner.volume_mounts` dictionary and the value is the volume mount\nconfiguration.\nIf `extraVolumeMounts` is defined as a dictionary, the keys of the dictionary\ncan be any descriptive name for the volume mount and the value is the volume mount\nconfiguration in k8s native syntax.\n" - }, - "extraVolumes": { - "type": [ - "object", - "array", - "null" - ], - "description": "Injects extra volumes into `KubeSpawner.volumes` dictionary. Can be a dictionary\nor an array.\nIf it's an array, each item must be volume configuration in k8s native\nsyntax. The name of the volume is used as the key in `KubeSpawner.volumes`\ndictionary while the value is the volume configuration.\nIf `extraVolumes` is defined as a dictionary, the keys of the dictionary\ncan be any descriptive name for the volume and the value must be the volume\nconfiguration in k8s native syntax.\n" - }, - "homeMountPath": { - "type": "string", - "description": "The location within the container where the home folder storage\nshould be mounted.\n" - }, - "static": { - "type": "object", - "additionalProperties": false, - "properties": { - "pvcName": { - "type": [ - "string", - "null" - ], - "description": "Configures `KubeSpawner.pvc_claim_name` to reference\npre-existing storage.\n" - }, - "subPath": { - "type": [ - "string", - "null" - ], - "description": "Configures the `subPath` field of a\n`KubeSpawner.volume_mounts` entry added by the Helm chart.\n\nPath within the volume from which the container's volume\nshould be mounted. Defaults to \"\" (volume's root).\n" - } - } - }, - "type": { - "enum": [ - "dynamic", - "static", - "none" - ], - "description": "Decide if you want storage to be provisioned dynamically\n(dynamic), or if you want to attach existing storage (static), or\ndon't want any storage to be attached (none).\n" - } - } - }, - "allowPrivilegeEscalation": { - "type": [ - "boolean", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.allow_privilege_escalation](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.allow_privilege_escalation).\n" - }, - "uid": { - "type": [ - "integer", - "null" - ], - "description": "Passthrough configuration for\n[KubeSpawner.uid](https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html#kubespawner.KubeSpawner.uid).\n\nThis dictates as what user the main container will start up as.\n\nAs an example of when this is needed, consider if you want to enable\nsudo rights for some of your users. This can be done by starting up as\nroot, enabling it from the container in a startup script, and then\ntransitioning to the normal user.\n\nDefault is 1000, set to null to use the container's default.\n" - } - } - }, - "scheduling": { - "type": "object", - "additionalProperties": false, - "description": "Objects for customizing the scheduling of various pods on the nodes and\nrelated labels.\n", - "properties": { - "userScheduler": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled", - "plugins", - "pluginConfig", - "logLevel" - ], - "description": "The user scheduler is making sure that user pods are scheduled\ntight on nodes, this is useful for autoscaling of user node pools.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Enables the user scheduler.\n" - }, - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "replicas": { - "type": "integer", - "description": "You can have multiple schedulers to share the workload or improve\navailability on node failure.\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "pdb": { - "type": "object", - "additionalProperties": false, - "description": "Configure a PodDisruptionBudget for this Deployment.\n\nThese are disabled by default for our deployments that don't support\nbeing run in parallel with multiple replicas. Only the user-scheduler\ncurrently supports being run in parallel with multiple replicas. If\nthey are enabled for a Deployment with only one replica, they will\nblock `kubectl drain` of a node for example.\n\nNote that if you aim to block scaling down a node with the\nhub/proxy/autohttps pod that would cause disruptions of the\ndeployment, then you should instead annotate the pods of the\nDeployment [as described\nhere](https://github.com/kubernetes/autoscaler/blob/HEAD/cluster-autoscaler/FAQ.md#what-types-of-pods-can-prevent-ca-from-removing-a-node).\n\n \"cluster-autoscaler.kubernetes.io/safe-to-evict\": \"false\"\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/)\nfor more details about disruptions.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Decides if a PodDisruptionBudget is created targeting the\nDeployment's pods.\n" - }, - "maxUnavailable": { - "type": [ - "integer", - "null" - ], - "description": "The maximum number of pods that can be unavailable during\nvoluntary disruptions.\n" - }, - "minAvailable": { - "type": [ - "integer", - "null" - ], - "description": "The minimum number of pods required to be available during\nvoluntary disruptions.\n" - } - } - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the userScheduler pods.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra annotations to add to the user-scheduler pods.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "logLevel": { - "type": "integer", - "description": "Corresponds to the verbosity level of logging made by the\nkube-scheduler binary running within the user-scheduler pod.\n" - }, - "plugins": { - "type": "object", - "additionalProperties": true, - "description": "These plugins refers to kube-scheduler plugins as documented\n[here](https://kubernetes.io/docs/reference/scheduling/config/).\n\nThe user-scheduler is really just a kube-scheduler configured in a\nway to pack users tight on nodes using these plugins. See\nvalues.yaml for information about the default plugins.\n" - }, - "pluginConfig": { - "type": "array", - "description": "Individually activated plugins can be configured further.\n" - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - }, - "serviceAccount": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "extraPodSpec": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra k8s pod specification as a YAML object. The default\nvalue of this setting is an empty object, i.e. no extra configuration.\nThe value of this property is augmented to the pod specification as-is.\n\nThis is a powerful tool for expert k8s administrators with advanced\nconfiguration requirements. This setting should only be used for\nconfiguration that cannot be accomplished through the other settings.\nMisusing this setting can break your deployment and/or compromise\nyour system security.\n\nThis is one of four related settings for inserting arbitrary pod\nspecification:\n\n1. hub.extraPodSpec\n2. proxy.chp.extraPodSpec\n3. proxy.traefik.extraPodSpec\n4. scheduling.userScheduler.extraPodSpec\n5. scheduling.userPlaceholder.extraPodSpec\n\nOne real-world use of these settings is to enable host networking. For\nexample, to configure host networking for the hub pod, add the\nfollowing to your helm configuration values:\n\n```yaml\nhub:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nLikewise, to configure host networking for the proxy pod, add the\nfollowing:\n\n```yaml\nproxy:\n chp:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nN.B. Host networking has special security implications and can easily\nbreak your deployment. This is an example\u2014not an endorsement.\n\nSee [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)\nfor the latest pod resource specification.\n" - } - } - }, - "podPriority": { - "type": "object", - "additionalProperties": false, - "description": "Pod Priority is used to allow real users evict user placeholder pods\nthat in turn by entering a Pending state can trigger a scale up by a\ncluster autoscaler.\n\nHaving this option enabled only make sense if the following conditions\nare met:\n\n1. A cluster autoscaler is installed.\n2. user-placeholer pods are configured to have a priority equal or\n higher than the cluster autoscaler's \"priority cutoff\" so that the\n cluster autoscaler scales up a node in advance for a pending user\n placeholder pod.\n3. Normal user pods have a higher priority than the user-placeholder\n pods.\n4. Image puller pods have a priority between normal user pods and\n user-placeholder pods.\n\nNote that if the default priority cutoff if not configured on cluster\nautoscaler, it will currently default to 0, and that in the future\nthis is meant to be lowered. If your cloud provider is installing the\ncluster autoscaler for you, they may also configure this specifically.\n\nRecommended settings for a cluster autoscaler...\n\n... with a priority cutoff of -10 (GKE):\n\n```yaml\npodPriority:\n enabled: true\n globalDefault: false\n defaultPriority: 0\n imagePullerPriority: -5\n userPlaceholderPriority: -10\n```\n\n... with a priority cutoff of 0:\n\n```yaml\npodPriority:\n enabled: true\n globalDefault: true\n defaultPriority: 10\n imagePullerPriority: 5\n userPlaceholderPriority: 0\n```\n", - "properties": { - "enabled": { - "type": "boolean" - }, - "globalDefault": { - "type": "boolean", - "description": "Warning! This will influence all pods in the cluster.\n\nThe priority a pod usually get is 0. But this can be overridden\nwith a PriorityClass resource if it is declared to be the global\ndefault. This configuration option allows for the creation of such\nglobal default.\n" - }, - "defaultPriority": { - "type": "integer", - "description": "The actual value for the default pod priority.\n" - }, - "imagePullerPriority": { - "type": "integer", - "description": "The actual value for the [hook|continuous]-image-puller pods' priority.\n" - }, - "userPlaceholderPriority": { - "type": "integer", - "description": "The actual value for the user-placeholder pods' priority.\n" - } - } - }, - "userPlaceholder": { - "type": "object", - "additionalProperties": false, - "description": "User placeholders simulate users but will thanks to PodPriority be\nevicted by the cluster autoscaler if a real user shows up. In this way\nplaceholders allow you to create a headroom for the real users and\nreduce the risk of a user having to wait for a node to be added. Be\nsure to use the the continuous image puller as well along with\nplaceholders, so the images are also available when real users arrive.\n\nTo test your setup efficiently, you can adjust the amount of user\nplaceholders with the following command:\n```sh\n# Configure to have 3 user placeholders\nkubectl scale sts/user-placeholder --replicas=3\n```\n", - "properties": { - "enabled": { - "type": "boolean" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "replicas": { - "type": "integer", - "description": "How many placeholder pods would you like to have?\n" - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the userPlaceholder pods.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra annotations to add to the placeholder pods.\n" - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "Unless specified here, the placeholder pods will request the same\nresources specified for the real singleuser pods.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "extraPodSpec": { - "type": "object", - "additionalProperties": true, - "description": "Arbitrary extra k8s pod specification as a YAML object. The default\nvalue of this setting is an empty object, i.e. no extra configuration.\nThe value of this property is augmented to the pod specification as-is.\n\nThis is a powerful tool for expert k8s administrators with advanced\nconfiguration requirements. This setting should only be used for\nconfiguration that cannot be accomplished through the other settings.\nMisusing this setting can break your deployment and/or compromise\nyour system security.\n\nThis is one of four related settings for inserting arbitrary pod\nspecification:\n\n1. hub.extraPodSpec\n2. proxy.chp.extraPodSpec\n3. proxy.traefik.extraPodSpec\n4. scheduling.userScheduler.extraPodSpec\n5. scheduling.userPlaceholder.extraPodSpec\n\nOne real-world use of these settings is to enable host networking. For\nexample, to configure host networking for the hub pod, add the\nfollowing to your helm configuration values:\n\n```yaml\nhub:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nLikewise, to configure host networking for the proxy pod, add the\nfollowing:\n\n```yaml\nproxy:\n chp:\n extraPodSpec:\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n```\n\nN.B. Host networking has special security implications and can easily\nbreak your deployment. This is an example\u2014not an endorsement.\n\nSee [PodSpec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)\nfor the latest pod resource specification.\n" - } - } - }, - "corePods": { - "type": "object", - "additionalProperties": false, - "description": "These settings influence the core pods like the hub, proxy and\nuser-scheduler pods.\nThese settings influence all pods considered core pods, namely:\n\n- hub\n- proxy\n- autohttps\n- hook-image-awaiter\n- user-scheduler\n\nBy defaults, the tolerations are:\n\n- hub.jupyter.org/dedicated=core:NoSchedule\n- hub.jupyter.org_dedicated=core:NoSchedule\n\nNote that tolerations set here are combined with the respective\ncomponents dedicated tolerations, and that `_` is available in case\n`/` isn't allowed in the clouds tolerations.\n", - "properties": { - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "nodeAffinity": { - "type": "object", - "additionalProperties": false, - "description": "Where should pods be scheduled? Perhaps on nodes with a certain\nlabel is preferred or even required?\n", - "properties": { - "matchNodePurpose": { - "enum": [ - "ignore", - "prefer", - "require" - ], - "description": "Decide if core pods *ignore*, *prefer* or *require* to\nschedule on nodes with this label:\n```\nhub.jupyter.org/node-purpose=core\n```\n" - } - } - } - } - }, - "userPods": { - "type": "object", - "additionalProperties": false, - "description": "These settings influence all pods considered user pods, namely:\n\n- user-placeholder\n- hook-image-puller\n- continuous-image-puller\n- jupyter-\n\nBy defaults, the tolerations are:\n\n- hub.jupyter.org/dedicated=core:NoSchedule\n- hub.jupyter.org_dedicated=core:NoSchedule\n\nNote that tolerations set here are combined with the respective\ncomponents dedicated tolerations, and that `_` is available in case\n`/` isn't allowed in the clouds tolerations.\n", - "properties": { - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "nodeAffinity": { - "type": "object", - "additionalProperties": false, - "description": "Where should pods be scheduled? Perhaps on nodes with a certain\nlabel is preferred or even required?\n", - "properties": { - "matchNodePurpose": { - "enum": [ - "ignore", - "prefer", - "require" - ], - "description": "Decide if user pods *ignore*, *prefer* or *require* to\nschedule on nodes with this label:\n```\nhub.jupyter.org/node-purpose=user\n```\n" - } - } - } - } - } - } - }, - "ingress": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "type": "boolean", - "description": "Enable the creation of a Kubernetes Ingress to proxy-public service.\n\nSee [Advanced Topics \u2014 Zero to JupyterHub with Kubernetes\n0.7.0 documentation](ingress)\nfor more details.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the Ingress resource.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\nfor more details about annotations.\n" - }, - "ingressClassName": { - "type": [ - "string", - "null" - ], - "description": "Maps directly to the Ingress resource's `spec.ingressClassName``.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class)\nfor more details.\n" - }, - "hosts": { - "type": "array", - "description": "List of hosts to route requests to the proxy.\n" - }, - "pathSuffix": { - "type": [ - "string", - "null" - ], - "description": "Suffix added to Ingress's routing path pattern.\n\nSpecify `*` if your ingress matches path by glob pattern.\n" - }, - "pathType": { - "enum": [ - "Prefix", - "Exact", - "ImplementationSpecific" - ], - "description": "The path type to use. The default value is 'Prefix'.\n\nSee [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)\nfor more details about path types.\n" - }, - "tls": { - "type": "array", - "description": "TLS configurations for Ingress.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)\nfor more details about annotations.\n" - }, - "extraPaths": { - "type": "array", - "description": "A list of custom paths to be added to the ingress configuration.\n\nSee [the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types)\nfor more details about paths.\n" - } - } - }, - "httpRoute": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "type": "boolean", - "description": "Enable the creation of a Kubernetes HTTPRoute to the proxy-public service.\n\nRequires support for the [Gateway API](https://gateway-api.sigs.k8s.io/).\n\nA Gateway must already exist.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the HTTPRoute resource.\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\nfor more details about annotations.\n" - }, - "hostnames": { - "type": "array", - "description": "List of hosts to route requests to the proxy, if empty route all requests regardless of host.\n" - }, - "gateway": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "The name of your Gateway for this route.\n" - }, - "namespace": { - "type": "string", - "description": "The namespace the Gateway is running in.\n" - }, - "sectionName": { - "type": "string", - "description": "The name of the Gateway listener section to route traffic to.\nCommon values are 'http', 'https', or custom listener names.\n" - } - } - } - } - }, - "prePuller": { - "type": "object", - "additionalProperties": false, - "required": [ - "hook", - "continuous" - ], - "properties": { - "revisionHistoryLimit": { - "type": [ - "integer", - "null" - ], - "minimum": 0, - "description": "Configures the resource's `spec.revisionHistoryLimit`. This is\navailable for Deployment, StatefulSet, and DaemonSet resources.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#revision-history-limit)\nfor more info.\n" - }, - "labels": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Extra labels to add to the pre puller job pods.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\nto learn more about labels.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the hook and continous image puller pods. One example use case is to\ndisable istio sidecars which could interfere with the image pulling.\n" - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "These are standard Kubernetes resources with requests and limits for\ncpu and memory. They will be used on the containers in the pods\npulling images. These should be set extremely low as the containers\nshut down directly or is a pause container that just idles.\n\nThey were made configurable as usage of ResourceQuota may require\ncontainers in the namespace to have explicit resources set.\n" - }, - "extraTolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "hook": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "description": "See the [*optimization\nsection*](pulling-images-before-users-arrive)\nfor more details.\n", - "properties": { - "enabled": { - "type": "boolean" - }, - "pullOnlyOnChanges": { - "type": "boolean", - "description": "Pull only if changes have been made to the images to pull, or more\naccurately if the hook-image-puller daemonset has changed in any\nway.\n" - }, - "podSchedulingWaitDuration": { - "description": "The `hook-image-awaiter` has a criteria to await all the\n`hook-image-puller` DaemonSet's pods to both schedule and finish\ntheir image pulling. This flag can be used to relax this criteria\nto instead only await the pods that _has already scheduled_ to\nfinish image pulling after a certain duration.\n\nThe value of this is that sometimes the newly created\n`hook-image-puller` pods cannot be scheduled because nodes are\nfull, and then it probably won't make sense to block a `helm\nupgrade`.\n\nAn infinite duration to wait for pods to schedule can be\nrepresented by `-1`. This was the default behavior of version\n0.9.0 and earlier.\n", - "type": "integer" - }, - "nodeSelector": { - "type": "object", - "additionalProperties": true, - "description": "An object with key value pairs representing labels. K8s Nodes are\nrequired to have match all these labels for this Pod to scheduled on\nthem.\n\n```yaml\ndisktype: ssd\nnodetype: awesome\n```\n\nSee [the Kubernetes\ndocumentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)\nfor more details.\n" - }, - "tolerations": { - "type": "array", - "description": "Tolerations allow a pod to be scheduled on nodes with taints. These\ntolerations are additional tolerations to the tolerations common to\nall pods of a their respective kind\n([scheduling.corePods.tolerations](schema_scheduling.corePods.tolerations),\n[scheduling.userPods.tolerations](schema_scheduling.userPods.tolerations)).\n\nPass this field an array of\n[`Toleration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#toleration-v1-core)\nobjects.\n\nSee the [Kubernetes\ndocs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)\nfor more info.\n" - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - }, - "resources": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of resources, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core).\n" - }, - "serviceAccount": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "serviceAccountImagePuller": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "daemonsetAnnotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the hook image puller DaemonSet.\n" - } - } - }, - "continuous": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "description": "See the [*optimization\nsection*](pulling-images-before-users-arrive)\nfor more details.\n\n```{note}\nIf used with a Cluster Autoscaler (an autoscaling node pool), also add\nuser-placeholders and enable pod priority.\n```\n", - "properties": { - "enabled": { - "type": "boolean" - }, - "serviceAccount": { - "type": "object", - "required": [ - "create" - ], - "additionalProperties": false, - "description": "Configuration for a k8s ServiceAccount dedicated for use by the\nspecific pod which this configuration is nested under.\n", - "properties": { - "create": { - "type": "boolean", - "description": "Whether or not to create the `ServiceAccount` resource.\n" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "This configuration serves multiple purposes:\n\n- It will be the `serviceAccountName` referenced by related Pods.\n- If `create` is set, the created ServiceAccount resource will be named like this.\n- If [`rbac.create`](schema_rbac.create) is set, the associated (Cluster)RoleBindings will bind to this name.\n\nIf not explicitly provided, a default name will be used.\n" - }, - "annotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Kubernetes annotations to apply to the k8s ServiceAccount.\n" - } - } - }, - "daemonsetAnnotations": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - ".*": { - "type": "string" - } - }, - "description": "Annotations to apply to the continuous image puller DaemonSet.\n" - } - } - }, - "pullProfileListImages": { - "type": "boolean", - "description": "The singleuser.profileList configuration can provide a selection of\nimages. This option determines if all images identified there should\nbe pulled, both by the hook and continuous pullers.\n\nImages are looked for under `kubespawner_override`, and also\n`profile_options.choices.kubespawner_override` since version 3.2.0.\n\nThe reason to disable this, is that if you have for example 10 images\nwhich start pulling in order from 1 to 10, a user that arrives and\nwants to start a pod with image number 10 will need to wait for all\nimages to be pulled, and then it may be preferable to just let the\nuser arriving wait for a single image to be pulled on arrival.\n" - }, - "extraImages": { - "type": "object", - "additionalProperties": false, - "description": "See the [*optimization section*](images-that-will-be-pulled) for more\ndetails.\n\n```yaml\nprePuller:\n extraImages:\n my-extra-image-i-want-pulled:\n name: jupyter/all-spark-notebook\n tag: 2343e33dec46\n```\n", - "patternProperties": { - ".*": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "properties": { - "name": { - "type": "string" - }, - "tag": { - "type": "string" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - } - } - } - } - }, - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "pause": { - "type": "object", - "additionalProperties": false, - "description": "The image-puller pods rely on initContainer to pull all images, and\ntheir actual container when they are done is just running a `pause`\ncontainer. These are settings for that pause container.\n", - "properties": { - "containerSecurityContext": { - "type": "object", - "additionalProperties": true, - "description": "A k8s native specification of the container's security context, see [the\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core)\nfor details.\n" - }, - "image": { - "type": "object", - "additionalProperties": false, - "required": [ - "name", - "tag" - ], - "description": "Set custom image name, tag, pullPolicy, or pullSecrets for the pod.\n", - "properties": { - "name": { - "type": "string", - "description": "The name of the image, without the tag.\n\n```\n# example name\ngcr.io/my-project/my-image\n```\n" - }, - "tag": { - "type": "string", - "description": "The tag of the image to pull. This is the value following `:` in\ncomplete image specifications.\n\n```\n# example tags\nv1.11.1\nzhy270a\n```\n" - }, - "pullPolicy": { - "enum": [ - null, - "", - "IfNotPresent", - "Always", - "Never" - ], - "description": "Configures the Pod's `spec.imagePullPolicy`.\n\nSee the [Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images)\nfor more info.\n" - }, - "pullSecrets": { - "type": "array", - "description": "A list of references to existing Kubernetes Secrets with\ncredentials to pull the image.\n\nThis Pod's final `imagePullSecrets` k8s specification will be a\ncombination of:\n\n1. This list of k8s Secrets, specific for this pod.\n2. The list of k8s Secrets, for use by all pods in the Helm chart,\n declared in this Helm charts configuration called\n `imagePullSecrets`.\n3. A k8s Secret, for use by all pods in the Helm chart, if\n conditionally created from image registry credentials provided\n under `imagePullSecret` if `imagePullSecret.create` is set to\n true.\n\n```yaml\n# example - k8s native syntax\npullSecrets:\n - name: my-k8s-secret-with-image-registry-credentials\n\n# example - simplified syntax\npullSecrets:\n - my-k8s-secret-with-image-registry-credentials\n```\n" - } - } - } - } - } - } - }, - "custom": { - "type": "object", - "additionalProperties": true, - "description": "AUP Learning Cloud custom configuration.\n\nThis section contains all project-specific settings including authentication,\naccelerators, resources, teams, quota management, and API service configuration.\n", - "properties": { - "authMode": { - "type": "string", - "enum": [ - "auto-login", - "dummy", - "github", - "multi" - ], - "description": "Authentication mode for the JupyterHub instance.\n\n- `auto-login`: No credentials required, auto-login as 'student' (for demos/single-node)\n- `dummy`: Accept any username/password (for testing)\n- `github`: GitHub OAuth authentication only\n- `multi`: GitHub OAuth + Local native accounts (recommended for production)\n" - }, - "adminUser": { - "type": "object", - "additionalProperties": false, - "description": "Auto-create admin user configuration.\nWhen enabled, Helm will generate random credentials and store them in a Secret.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Enable auto-admin creation on first install.\nCredentials will be stored in `jupyterhub-admin-credentials` secret.\n" - } - } - }, - "accelerators": { - "type": "object", - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "properties": { - "displayName": { - "type": "string", - "description": "Human-readable name shown in the resource selector." - }, - "description": { - "type": "string", - "description": "Detailed description of the accelerator hardware." - }, - "nodeSelector": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Kubernetes node selector labels to schedule pods on specific nodes." - }, - "env": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Environment variables to set for containers using this accelerator." - }, - "quotaRate": { - "type": "integer", - "minimum": 1, - "description": "Quota units consumed per minute when using this accelerator." - } - } - }, - "description": "GPU/NPU accelerator configurations.\nEach key defines an accelerator type with its display name, node selector,\nenvironment variables, and quota consumption rate.\n\nExample:\n```yaml\naccelerators:\n strix:\n displayName: \"AMD Radeon\u2122 890M (Strix Point iGPU)\"\n description: \"RDNA 3.5 | 16 CUs | 4GB LPDDR5X\"\n nodeSelector:\n node-type: strix\n env:\n HSA_OVERRIDE_GFX_VERSION: \"11.5.0\"\n quotaRate: 2\n```\n" - }, - "resources": { - "type": "object", - "additionalProperties": false, - "description": "Resource images and requirements configuration.\nDefines available container images and their resource requirements.\n", - "properties": { - "images": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Mapping of resource name to container image.\nThe key `cpu` is the default CPU-only image.\n\nExample:\n```yaml\nimages:\n cpu: \"ghcr.io/amdresearch/auplc-default:latest\"\n Course-DL: \"ghcr.io/amdresearch/auplc-dl:latest\"\n```\n" - }, - "requirements": { - "type": "object", - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "CPU request/limit (e.g., \"2\", \"4\")" - }, - "memory": { - "type": "string", - "description": "Memory request (e.g., \"4Gi\", \"16Gi\")" - }, - "memory_limit": { - "type": "string", - "description": "Memory limit (e.g., \"6Gi\", \"24Gi\")" - }, - "amd.com/gpu": { - "type": "string", - "description": "Number of AMD GPUs required (e.g., \"1\")" - } - } - }, - "description": "Resource requirements per resource type.\n\nExample:\n```yaml\nrequirements:\n Course-DL:\n cpu: \"4\"\n memory: \"16Gi\"\n memory_limit: \"24Gi\"\n amd.com/gpu: \"1\"\n```\n" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "properties": { - "group": { - "type": "string", - "description": "Category group name for UI display (e.g., \"COURSE\", \"OTHERS\")" - }, - "description": { - "type": "string", - "description": "Main title displayed on the resource card" - }, - "subDescription": { - "type": "string", - "description": "Secondary description text shown below the title" - }, - "accelerator": { - "type": "string", - "description": "Accelerator type label (e.g., \"GPU\", \"NPU\")" - }, - "acceleratorKeys": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of accelerator keys from custom.accelerators" - }, - "allowGitClone": { - "type": "boolean", - "description": "Allow users to clone a Git repository into their home\ndirectory at spawn time. Should be false for course\nresources that already include their own content.\n" - }, - "env": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Per-resource environment variable overrides. Applied after\naccelerator-level env vars. Set a value to empty string \"\"\nto unset an accelerator-inherited variable.\n" - }, - "acceleratorOverrides": { - "type": "object", - "additionalProperties": { - "type": "object", - "properties": { - "image": { - "type": "string", - "description": "Override image for this accelerator." - }, - "env": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Environment variable overrides for this accelerator.\nSet a value to empty string \"\" to unset a variable.\n" - } - } - }, - "description": "Per-accelerator overrides for image and/or env, keyed by\naccelerator name. Highest priority for env \u2014 applied after\nboth accelerator-level and resource-level env.\n" - } - } - }, - "description": "Resource metadata for spawn UI display.\nDefines how each resource appears in the selection interface.\n\nEnvironment priority (highest wins):\n1. `acceleratorOverrides..env` (per-accelerator per-resource)\n2. `env` (per-resource, all accelerators)\n3. `accelerators..env` (per-accelerator, all resources)\n\nExample:\n```yaml\nmetadata:\n Course-CV:\n group: \"COURSE\"\n description: \"Computer Vision Course\"\n accelerator: \"GPU\"\n acceleratorKeys:\n - strix-halo\n - phx\n acceleratorOverrides:\n phx:\n image: \"ghcr.io/amdresearch/auplc-cv:v1.3.7\"\n env:\n HSA_OVERRIDE_GFX_VERSION: \"11.0.0\"\n```\n" - } - } - }, - "teams": { - "type": "object", - "additionalProperties": false, - "description": "Team to resource mapping configuration.\nControls which resources are available to users based on their team membership.\n", - "properties": { - "mapping": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - } - }, - "description": "Map team names to lists of allowed resource types.\n\nExample:\n```yaml\nmapping:\n gpu:\n - Course-CV\n - Course-DL\n official:\n - cpu\n - Course-CV\n - Course-DL\n```\n" - } - } - }, - "quota": { - "type": "object", - "additionalProperties": false, - "description": "User quota management system configuration.\nControls container usage time with quota units consumed per minute.\n", - "properties": { - "enabled": { - "type": [ - "boolean", - "null" - ], - "description": "Enable/disable quota system.\nSet to `null` for auto-detection based on authMode\n(disabled for auto-login/dummy, enabled otherwise).\n" - }, - "cpuRate": { - "type": "integer", - "minimum": 1, - "description": "Quota units consumed per minute for CPU-only workloads (no accelerator).\n" - }, - "minimumToStart": { - "type": "integer", - "minimum": 0, - "description": "Minimum quota balance required to start a container.\nUsers with less than this amount cannot spawn new servers.\n" - }, - "defaultQuota": { - "type": "integer", - "minimum": 0, - "description": "Default quota granted to new users on first login.\nSet to 0 to require admin to manually allocate quota.\n" - }, - "refreshRules": { - "type": "object", - "additionalProperties": { - "type": "object", - "additionalProperties": false, - "properties": { - "enabled": { - "type": "boolean", - "description": "Enable/disable this refresh rule." - }, - "schedule": { - "type": "string", - "description": "Cron schedule expression (e.g., \"0 0 * * *\" for daily at midnight).\n" - }, - "action": { - "type": "string", - "enum": [ - "add", - "set" - ], - "description": "Action to perform: `add` adds to current balance, `set` sets exact amount.\n" - }, - "amount": { - "type": "integer", - "description": "Amount to add/set. Use negative values with `add` action for deductions.\n" - }, - "maxBalance": { - "type": [ - "integer", - "null" - ], - "description": "Maximum balance cap (for positive `add` actions).\n" - }, - "minBalance": { - "type": [ - "integer", - "null" - ], - "description": "Minimum balance floor (for negative `add` actions).\n" - }, - "targets": { - "type": "object", - "additionalProperties": false, - "properties": { - "includeUnlimited": { - "type": "boolean", - "description": "Include users with unlimited quota." - }, - "balanceBelow": { - "type": [ - "integer", - "null" - ], - "description": "Only apply to users with balance below this value." - }, - "balanceAbove": { - "type": [ - "integer", - "null" - ], - "description": "Only apply to users with balance above this value." - }, - "includeUsers": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Only apply to these specific users." - }, - "excludeUsers": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Exclude these specific users." - }, - "usernamePattern": { - "type": "string", - "description": "Regex pattern for username matching." - } - }, - "description": "Target filters for selective quota refresh.\n" - } - } - }, - "description": "Auto-refresh quota rules. Each rule generates a Kubernetes CronJob.\n\nExample:\n```yaml\nrefreshRules:\n daily-topup:\n enabled: true\n schedule: \"0 0 * * *\"\n action: add\n amount: 100\n maxBalance: 500\n targets:\n balanceBelow: 400\n```\n" - } - } - }, - "gitClone": { - "type": "object", - "additionalProperties": false, - "description": "Git repository cloning configuration.\nUsers can optionally provide a Git URL on the spawn form; the repo is\ncloned into their home directory via an init container at startup.\n", - "properties": { - "initContainerImage": { - "type": "string", - "description": "Container image used for git operations.\nMust include `git` and `sh`. Defaults to `alpine/git:2.47.2`.\n" - }, - "allowedProviders": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Allowed Git hosting providers. Subdomains are also accepted.\nDefaults to github.com, gitlab.com, and bitbucket.org.\n" - }, - "maxCloneTimeout": { - "type": "integer", - "minimum": 10, - "description": "Maximum time in seconds allowed for a clone or fetch operation.\nDefaults to 300 (5 minutes).\n" - }, - "githubAppName": { - "type": "string", - "description": "GitHub App slug name. When configured, enables GitHub App integration\nfor private repo access and repo picker UI.\nLeave empty to disable GitHub App features.\n" - }, - "defaultAccessToken": { - "type": "string", - "description": "Default access token for private repos (e.g. a bot/service account PAT).\nUsed as fallback when user provides no PAT and no OAuth token is available.\nPriority: user PAT > OAuth token > defaultAccessToken.\nHelm auto-creates a K8s Secret (jupyterhub-git-default-token) from this value.\n" - } - } - }, - "apiService": { - "type": "object", - "additionalProperties": false, - "description": "API service configuration for external integrations.\n", - "properties": { - "image": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "Container image name." - }, - "tag": { - "type": "string", - "description": "Container image tag." - }, - "pullPolicy": { - "enum": [ - "", - "IfNotPresent", - "Always", - "Never", - "null" - ], - "description": "Image pull policy." - } - } - } - } - } - } - }, - "cull": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "description": "The\n[jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler)\ncan run as a JupyterHub managed service to _cull_ running servers.\n", - "properties": { - "enabled": { - "type": "boolean", - "description": "Enable/disable use of jupyter-idle-culler.\n" - }, - "users": { - "type": [ - "boolean", - "null" - ], - "description": "See the `--cull-users` flag." - }, - "adminUsers": { - "type": [ - "boolean", - "null" - ], - "description": "See the `--cull-admin-users` flag." - }, - "removeNamedServers": { - "type": [ - "boolean", - "null" - ], - "description": "See the `--remove-named-servers` flag." - }, - "timeout": { - "type": [ - "integer", - "null" - ], - "description": "See the `--timeout` flag." - }, - "every": { - "type": [ - "integer", - "null" - ], - "description": "See the `--cull-every` flag." - }, - "concurrency": { - "type": [ - "integer", - "null" - ], - "description": "See the `--concurrency` flag." - }, - "maxAge": { - "type": [ - "integer", - "null" - ], - "description": "See the `--max-age` flag." - } - } - }, - "debug": { - "type": "object", - "additionalProperties": false, - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "type": "boolean", - "description": "Increases the loglevel throughout the resources in the Helm chart.\n" - } - } - }, - "rbac": { - "type": "object", - "additionalProperties": false, - "required": [ - "create" - ], - "properties": { - "enabled": { - "type": "boolean", - "description": "````{note}\nRemoved in version 2.0.0. If you have been using `rbac.enable=false`\n(strongly discouraged), then the equivalent configuration would be:\n\n```yaml\nrbac:\n create: false\nhub:\n serviceAccount:\n create: false\nproxy:\n traefik:\n serviceAccount:\n create: false\nscheduling:\n userScheduler:\n serviceAccount:\n create: false\nprePuller:\n hook:\n serviceAccount:\n create: false\n```\n````\n" - }, - "create": { - "type": "boolean", - "description": "Decides if (Cluster)Role and (Cluster)RoleBinding resources are\ncreated and bound to the configured serviceAccounts.\n" - } - } - }, - "global": { - "type": "object", - "additionalProperties": true, - "properties": { - "safeToShowValues": { - "type": "boolean", - "description": "A flag that should only be set to true temporarily when experiencing a\ndeprecation message that contain censored content that you wish to\nreveal.\n" - } - } - } - } -} \ No newline at end of file +{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","additionalProperties":false,"required":["imagePullSecrets","hub","proxy","singleuser","ingress","prePuller","custom","cull","debug","rbac","global"],"properties":{"enabled":{"type":["boolean","null"]},"fullnameOverride":{"type":["string","null"]},"nameOverride":{"type":["string","null"]},"imagePullSecret":{"type":"object","required":["create"],"if":{"properties":{"create":{"const":true}}},"then":{"additionalProperties":false,"required":["registry","username","password"],"properties":{"create":{"type":"boolean"},"automaticReferenceInjection":{"type":"boolean"},"registry":{"type":"string"},"username":{"type":"string"},"password":{"type":"string"},"email":{"type":["string","null"]}}}},"imagePullSecrets":{"type":"array"},"hub":{"type":"object","additionalProperties":false,"required":["baseUrl"],"properties":{"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"config":{"type":"object","additionalProperties":false,"patternProperties":{"^[A-Z].*$":{"type":"object","additionalProperties":true}},"properties":{"JupyterHub":{"type":"object","additionalProperties":true,"properties":{"subdomain_host":{"type":"string"}}}}},"extraFiles":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"object","additionalProperties":false,"required":["mountPath"],"oneOf":[{"required":["data"]},{"required":["stringData"]},{"required":["binaryData"]}],"properties":{"mountPath":{"type":"string"},"data":{"type":"object","additionalProperties":true},"stringData":{"type":"string"},"binaryData":{"type":"string"},"mode":{"type":"number"}}}}},"baseUrl":{"type":"string"},"command":{"type":"array"},"args":{"type":"array"},"cookieSecret":{"type":["string","null"]},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"networkPolicy":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"ingress":{"type":"array"},"egress":{"type":"array"},"egressAllowRules":{"type":"object","additionalProperties":false,"properties":{"cloudMetadataServer":{"type":"boolean"},"dnsPortsCloudMetadataServer":{"type":"boolean"},"dnsPortsKubeSystemNamespace":{"type":"boolean"},"dnsPortsPrivateIPs":{"type":"boolean"},"nonPrivateIPs":{"type":"boolean"},"privateIPs":{"type":"boolean"}}},"interNamespaceAccessLabels":{"enum":["accept","ignore"]},"allowedIngressPorts":{"type":"array"}}},"db":{"type":"object","additionalProperties":false,"properties":{"type":{"enum":["sqlite-pvc","sqlite-memory","mysql","postgres","other"]},"pvc":{"type":"object","additionalProperties":false,"required":["storage"],"properties":{"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"selector":{"type":"object","additionalProperties":true},"storage":{"type":"string"},"accessModes":{"type":"array","items":{"type":["string","null"]}},"storageClassName":{"type":["string","null"]},"subPath":{"type":["string","null"]}}},"upgrade":{"type":["boolean","null"]},"url":{"type":["string","null"]},"password":{"type":["string","null"]}}},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"initContainers":{"type":"array"},"extraEnv":{"type":["object","array"],"additionalProperties":true},"extraConfig":{"type":"object","additionalProperties":true},"fsGid":{"type":["integer","null"],"minimum":0},"service":{"type":"object","additionalProperties":false,"properties":{"type":{"enum":["ClusterIP","NodePort","LoadBalancer","ExternalName"]},"ports":{"type":"object","additionalProperties":false,"properties":{"appProtocol":{"type":["string","null"]},"nodePort":{"type":["integer","null"],"minimum":0}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"extraPorts":{"type":"array"},"loadBalancerIP":{"type":["string","null"]},"ipFamilyPolicy":{"type":["string"]},"ipFamilies":{"type":"array"}}},"pdb":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"maxUnavailable":{"type":["integer","null"]},"minAvailable":{"type":["integer","null"]}}},"existingSecret":{"type":["string","null"]},"nodeSelector":{"type":"object","additionalProperties":true},"tolerations":{"type":"array"},"activeServerLimit":{"type":["integer","null"]},"allowNamedServers":{"type":["boolean","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"authenticatePrometheus":{"type":["boolean","null"]},"concurrentSpawnLimit":{"type":["integer","null"]},"consecutiveFailureLimit":{"type":["integer","null"]},"podSecurityContext":{"additionalProperties":true},"containerSecurityContext":{"type":"object","additionalProperties":true},"deploymentStrategy":{"type":"object","additionalProperties":false,"properties":{"rollingUpdate":{"type":["string","null"]},"type":{"type":["string","null"]}}},"extraContainers":{"type":"array"},"extraVolumeMounts":{"type":"array"},"extraVolumes":{"type":"array"},"livenessProbe":{"type":"object","additionalProperties":true,"required":["enabled"],"if":{"properties":{"enabled":{"const":true}}},"then":{}},"readinessProbe":{"type":"object","additionalProperties":true,"required":["enabled"],"if":{"properties":{"enabled":{"const":true}}},"then":{}},"namedServerLimitPerUser":{"type":["integer","null"]},"redirectToServer":{"type":["boolean","null"]},"resources":{"type":"object","additionalProperties":true},"lifecycle":{"type":"object","additionalProperties":false,"properties":{"postStart":{"type":"object","additionalProperties":true},"preStop":{"type":"object","additionalProperties":true}}},"services":{"type":"object","additionalProperties":true,"properties":{"name":{"type":"string"},"admin":{"type":"boolean"},"command":{"type":["string","array"]},"url":{"type":"string"},"api_token":{"type":["string","null"]},"apiToken":{"type":["string","null"]}}},"loadRoles":{"type":"object","additionalProperties":true},"shutdownOnLogout":{"type":["boolean","null"]},"templatePaths":{"type":"array"},"templateVars":{"type":"object","additionalProperties":true},"serviceAccount":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"extraPodSpec":{"type":"object","additionalProperties":true}}},"proxy":{"type":"object","additionalProperties":false,"properties":{"chp":{"type":"object","additionalProperties":false,"properties":{"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"networkPolicy":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"ingress":{"type":"array"},"egress":{"type":"array"},"egressAllowRules":{"type":"object","additionalProperties":false,"properties":{"cloudMetadataServer":{"type":"boolean"},"dnsPortsCloudMetadataServer":{"type":"boolean"},"dnsPortsKubeSystemNamespace":{"type":"boolean"},"dnsPortsPrivateIPs":{"type":"boolean"},"nonPrivateIPs":{"type":"boolean"},"privateIPs":{"type":"boolean"}}},"interNamespaceAccessLabels":{"enum":["accept","ignore"]},"allowedIngressPorts":{"type":"array"}}},"extraCommandLineFlags":{"type":"array"},"extraEnv":{"type":["object","array"],"additionalProperties":true},"pdb":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"maxUnavailable":{"type":["integer","null"]},"minAvailable":{"type":["integer","null"]}}},"nodeSelector":{"type":"object","additionalProperties":true},"tolerations":{"type":"array"},"containerSecurityContext":{"type":"object","additionalProperties":true},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"livenessProbe":{"type":"object","additionalProperties":true,"required":["enabled"],"if":{"properties":{"enabled":{"const":true}}},"then":{}},"readinessProbe":{"type":"object","additionalProperties":true,"required":["enabled"],"if":{"properties":{"enabled":{"const":true}}},"then":{}},"resources":{"type":"object","additionalProperties":true},"defaultTarget":{"type":["string","null"]},"errorTarget":{"type":["string","null"]},"extraPodSpec":{"type":"object","additionalProperties":true}}},"secretToken":{"type":["string","null"]},"service":{"type":"object","additionalProperties":false,"properties":{"type":{"enum":["ClusterIP","NodePort","LoadBalancer","ExternalName"]},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"nodePorts":{"type":"object","additionalProperties":false,"properties":{"http":{"type":["integer","null"]},"https":{"type":["integer","null"]}}},"loadBalancerPort":{"type":"object","additionalProperties":false,"properties":{"http":{"type":["integer","null"]},"https":{"type":["integer","null"]}}},"disableHttpPort":{"type":"boolean"},"extraPorts":{"type":"array"},"externalIPs":{"type":"array"},"loadBalancerIP":{"type":["string","null"]},"loadBalancerSourceRanges":{"type":"array"},"ipFamilyPolicy":{"type":["string"]},"ipFamilies":{"type":"array"}}},"https":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":["boolean","null"]},"type":{"enum":[null,"","letsencrypt","manual","offload","secret"]},"letsencrypt":{"type":"object","additionalProperties":false,"properties":{"contactEmail":{"type":["string","null"]},"acmeServer":{"type":["string","null"]}}},"manual":{"type":"object","additionalProperties":false,"properties":{"key":{"type":["string","null"]},"cert":{"type":["string","null"]}}},"secret":{"type":"object","additionalProperties":false,"properties":{"name":{"type":["string","null"]},"key":{"type":["string","null"]},"crt":{"type":["string","null"]}}},"hosts":{"type":"array"}}},"traefik":{"type":"object","additionalProperties":false,"properties":{"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"networkPolicy":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"ingress":{"type":"array"},"egress":{"type":"array"},"egressAllowRules":{"type":"object","additionalProperties":false,"properties":{"cloudMetadataServer":{"type":"boolean"},"dnsPortsCloudMetadataServer":{"type":"boolean"},"dnsPortsKubeSystemNamespace":{"type":"boolean"},"dnsPortsPrivateIPs":{"type":"boolean"},"nonPrivateIPs":{"type":"boolean"},"privateIPs":{"type":"boolean"}}},"interNamespaceAccessLabels":{"enum":["accept","ignore"]},"allowedIngressPorts":{"type":"array"}}},"extraInitContainers":{"type":"array"},"extraEnv":{"type":["object","array"],"additionalProperties":true},"pdb":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"maxUnavailable":{"type":["integer","null"]},"minAvailable":{"type":["integer","null"]}}},"nodeSelector":{"type":"object","additionalProperties":true},"tolerations":{"type":"array"},"containerSecurityContext":{"type":"object","additionalProperties":true},"extraDynamicConfig":{"type":"object","additionalProperties":true},"extraPorts":{"type":"array"},"extraStaticConfig":{"type":"object","additionalProperties":true},"extraVolumes":{"type":"array"},"extraVolumeMounts":{"type":"array"},"hsts":{"type":"object","additionalProperties":false,"required":["includeSubdomains","maxAge","preload"],"properties":{"includeSubdomains":{"type":"boolean"},"maxAge":{"type":"integer"},"preload":{"type":"boolean"}}},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"resources":{"type":"object","additionalProperties":true},"serviceAccount":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"extraPodSpec":{"type":"object","additionalProperties":true}}},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"deploymentStrategy":{"type":"object","additionalProperties":false,"properties":{"rollingUpdate":{"type":["string","null"]},"type":{"type":["string","null"]}}},"secretSync":{"type":"object","additionalProperties":false,"properties":{"containerSecurityContext":{"type":"object","additionalProperties":true},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"resources":{"type":"object","additionalProperties":true}}}}},"singleuser":{"type":"object","additionalProperties":false,"properties":{"networkPolicy":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"ingress":{"type":"array"},"egress":{"type":"array"},"egressAllowRules":{"type":"object","additionalProperties":false,"properties":{"cloudMetadataServer":{"type":"boolean"},"dnsPortsCloudMetadataServer":{"type":"boolean"},"dnsPortsKubeSystemNamespace":{"type":"boolean"},"dnsPortsPrivateIPs":{"type":"boolean"},"nonPrivateIPs":{"type":"boolean"},"privateIPs":{"type":"boolean"}}},"interNamespaceAccessLabels":{"enum":["accept","ignore"]},"allowedIngressPorts":{"type":"array"}}},"podNameTemplate":{"type":["string","null"]},"cpu":{"type":"object","additionalProperties":false,"properties":{"limit":{"type":["number","null"]},"guarantee":{"type":["number","null"]}}},"memory":{"type":"object","additionalProperties":false,"properties":{"limit":{"type":["number","string","null"]},"guarantee":{"type":["number","string","null"]}}},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"initContainers":{"type":"array"},"profileList":{"type":"array"},"extraFiles":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"object","additionalProperties":false,"required":["mountPath"],"oneOf":[{"required":["data"]},{"required":["stringData"]},{"required":["binaryData"]}],"properties":{"mountPath":{"type":"string"},"data":{"type":"object","additionalProperties":true},"stringData":{"type":"string"},"binaryData":{"type":"string"},"mode":{"type":"number"}}}}},"extraEnv":{"type":["object","array"],"additionalProperties":true},"nodeSelector":{"type":"object","additionalProperties":true},"extraTolerations":{"type":"array"},"extraNodeAffinity":{"type":"object","additionalProperties":false,"properties":{"required":{"type":"array"},"preferred":{"type":"array"}}},"extraPodAffinity":{"type":"object","additionalProperties":false,"properties":{"required":{"type":"array"},"preferred":{"type":"array"}}},"extraPodAntiAffinity":{"type":"object","additionalProperties":false,"properties":{"required":{"type":"array"},"preferred":{"type":"array"}}},"cloudMetadata":{"type":"object","additionalProperties":false,"required":["blockWithIptables","ip"],"properties":{"blockWithIptables":{"type":"boolean"},"ip":{"type":"string"}}},"cmd":{"type":["array","string","null"]},"defaultUrl":{"type":["string","null"]},"events":{"type":["boolean","null"]},"extraAnnotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"extraContainers":{"type":"array"},"extraLabels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"extraPodConfig":{"type":"object","additionalProperties":true},"extraResource":{"type":"object","additionalProperties":false,"properties":{"guarantees":{"type":"object","additionalProperties":true},"limits":{"type":"object","additionalProperties":true}}},"fsGid":{"type":["integer","null"]},"lifecycleHooks":{"type":"object","additionalProperties":false,"properties":{"postStart":{"type":"object","additionalProperties":true},"preStop":{"type":"object","additionalProperties":true}}},"networkTools":{"type":"object","additionalProperties":false,"properties":{"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"resources":{"type":"object","additionalProperties":true}}},"serviceAccountName":{"type":["string","null"]},"startTimeout":{"type":["integer","null"]},"storage":{"type":"object","additionalProperties":false,"required":["type","homeMountPath"],"properties":{"capacity":{"type":["string","null"]},"dynamic":{"type":"object","additionalProperties":false,"properties":{"pvcNameTemplate":{"type":["string","null"]},"storageAccessModes":{"type":"array","items":{"type":["string","null"]}},"storageClass":{"type":["string","null"]},"subPath":{"type":["string","null"]},"volumeNameTemplate":{"type":["string","null"]}}},"extraLabels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"extraVolumeMounts":{"type":["object","array","null"]},"extraVolumes":{"type":["object","array","null"]},"homeMountPath":{"type":"string"},"static":{"type":"object","additionalProperties":false,"properties":{"pvcName":{"type":["string","null"]},"subPath":{"type":["string","null"]}}},"type":{"enum":["dynamic","static","none"]}}},"allowPrivilegeEscalation":{"type":["boolean","null"]},"uid":{"type":["integer","null"]}}},"scheduling":{"type":"object","additionalProperties":false,"properties":{"userScheduler":{"type":"object","additionalProperties":false,"required":["enabled","plugins","pluginConfig","logLevel"],"properties":{"enabled":{"type":"boolean"},"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"replicas":{"type":"integer"},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"pdb":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"maxUnavailable":{"type":["integer","null"]},"minAvailable":{"type":["integer","null"]}}},"nodeSelector":{"type":"object","additionalProperties":true},"tolerations":{"type":"array"},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"containerSecurityContext":{"type":"object","additionalProperties":true},"logLevel":{"type":"integer"},"plugins":{"type":"object","additionalProperties":true},"pluginConfig":{"type":"array"},"resources":{"type":"object","additionalProperties":true},"serviceAccount":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"extraPodSpec":{"type":"object","additionalProperties":true}}},"podPriority":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"globalDefault":{"type":"boolean"},"defaultPriority":{"type":"integer"},"imagePullerPriority":{"type":"integer"},"userPlaceholderPriority":{"type":"integer"}}},"userPlaceholder":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"replicas":{"type":"integer"},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"resources":{"type":"object","additionalProperties":true},"containerSecurityContext":{"type":"object","additionalProperties":true},"extraPodSpec":{"type":"object","additionalProperties":true}}},"corePods":{"type":"object","additionalProperties":false,"properties":{"tolerations":{"type":"array"},"nodeAffinity":{"type":"object","additionalProperties":false,"properties":{"matchNodePurpose":{"enum":["ignore","prefer","require"]}}}}},"userPods":{"type":"object","additionalProperties":false,"properties":{"tolerations":{"type":"array"},"nodeAffinity":{"type":"object","additionalProperties":false,"properties":{"matchNodePurpose":{"enum":["ignore","prefer","require"]}}}}}}},"ingress":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"ingressClassName":{"type":["string","null"]},"hosts":{"type":"array"},"pathSuffix":{"type":["string","null"]},"pathType":{"enum":["Prefix","Exact","ImplementationSpecific"]},"tls":{"type":"array"},"extraPaths":{"type":"array"}}},"httpRoute":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"hostnames":{"type":"array"},"gateway":{"type":"object","additionalProperties":false,"required":["name"],"properties":{"name":{"type":"string"},"namespace":{"type":"string"},"sectionName":{"type":"string"}}}}},"prePuller":{"type":"object","additionalProperties":false,"required":["hook","continuous"],"properties":{"revisionHistoryLimit":{"type":["integer","null"],"minimum":0},"labels":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}},"resources":{"type":"object","additionalProperties":true},"extraTolerations":{"type":"array"},"hook":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"},"pullOnlyOnChanges":{"type":"boolean"},"podSchedulingWaitDuration":{"type":"integer"},"nodeSelector":{"type":"object","additionalProperties":true},"tolerations":{"type":"array"},"containerSecurityContext":{"type":"object","additionalProperties":true},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}},"resources":{"type":"object","additionalProperties":true},"serviceAccount":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"serviceAccountImagePuller":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"daemonsetAnnotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"continuous":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"},"serviceAccount":{"type":"object","required":["create"],"additionalProperties":false,"properties":{"create":{"type":"boolean"},"name":{"type":["string","null"]},"annotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"daemonsetAnnotations":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"string"}}}}},"pullProfileListImages":{"type":"boolean"},"extraImages":{"type":"object","additionalProperties":false,"patternProperties":{".*":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]}}}}},"containerSecurityContext":{"type":"object","additionalProperties":true},"pause":{"type":"object","additionalProperties":false,"properties":{"containerSecurityContext":{"type":"object","additionalProperties":true},"image":{"type":"object","additionalProperties":false,"required":["name","tag"],"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":[null,"","IfNotPresent","Always","Never"]},"pullSecrets":{"type":"array"}}}}}}},"custom":{"type":"object","additionalProperties":true,"properties":{"authMode":{"type":"string","enum":["auto-login","dummy","github","multi"]},"adminUser":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"}}},"accelerators":{"type":"object","additionalProperties":{"type":"object","additionalProperties":false,"properties":{"displayName":{"type":"string"},"description":{"type":"string"},"nodeSelector":{"type":"object","additionalProperties":{"type":"string"}},"env":{"type":"object","additionalProperties":{"type":"string"}},"quotaRate":{"type":"integer","minimum":1}}}},"resources":{"type":"object","additionalProperties":false,"properties":{"images":{"type":"object","additionalProperties":{"type":"string"}},"requirements":{"type":"object","additionalProperties":{"type":"object","additionalProperties":false,"properties":{"cpu":{"type":"string"},"memory":{"type":"string"},"memory_limit":{"type":"string"},"amd.com/gpu":{"type":"string"}}}},"metadata":{"type":"object","additionalProperties":{"type":"object","additionalProperties":false,"properties":{"group":{"type":"string"},"description":{"type":"string"},"subDescription":{"type":"string"},"accelerator":{"type":"string"},"acceleratorKeys":{"type":"array","items":{"type":"string"}},"allowGitClone":{"type":"boolean"},"env":{"type":"object","additionalProperties":{"type":"string"}},"acceleratorOverrides":{"type":"object","additionalProperties":{"type":"object","properties":{"image":{"type":"string"},"env":{"type":"object","additionalProperties":{"type":"string"}}}}}}}}}},"teams":{"type":"object","additionalProperties":false,"properties":{"mapping":{"type":"object","additionalProperties":{"type":"array","items":{"type":"string"}}}}},"quota":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":["boolean","null"]},"cpuRate":{"type":"integer","minimum":1},"minimumToStart":{"type":"integer","minimum":0},"defaultQuota":{"type":"integer","minimum":0},"refreshRules":{"type":"object","additionalProperties":{"type":"object","additionalProperties":false,"properties":{"enabled":{"type":"boolean"},"schedule":{"type":"string"},"action":{"type":"string","enum":["add","set"]},"amount":{"type":"integer"},"maxBalance":{"type":["integer","null"]},"minBalance":{"type":["integer","null"]},"targets":{"type":"object","additionalProperties":false,"properties":{"includeUnlimited":{"type":"boolean"},"balanceBelow":{"type":["integer","null"]},"balanceAbove":{"type":["integer","null"]},"includeUsers":{"type":"array","items":{"type":"string"}},"excludeUsers":{"type":"array","items":{"type":"string"}},"usernamePattern":{"type":"string"}}}}}}}},"gitClone":{"type":"object","additionalProperties":false,"properties":{"initContainerImage":{"type":"string"},"allowedProviders":{"type":"array","items":{"type":"string"}},"maxCloneTimeout":{"type":"integer","minimum":10},"githubAppName":{"type":"string"},"defaultAccessToken":{"type":"string"}}},"hub":{"type":"object","additionalProperties":false,"properties":{"allowedOrigins":{"type":"array","items":{"type":"string"}}}},"notebook":{"type":"object","additionalProperties":false,"properties":{"allowedOrigins":{"type":"array","items":{"type":"string"}}}},"apiService":{"type":"object","additionalProperties":false,"properties":{"image":{"type":"object","additionalProperties":false,"properties":{"name":{"type":"string"},"tag":{"type":"string"},"pullPolicy":{"enum":["","IfNotPresent","Always","Never","null"]}}}}}}},"cull":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"},"users":{"type":["boolean","null"]},"adminUsers":{"type":["boolean","null"]},"removeNamedServers":{"type":["boolean","null"]},"timeout":{"type":["integer","null"]},"every":{"type":["integer","null"]},"concurrency":{"type":["integer","null"]},"maxAge":{"type":["integer","null"]}}},"debug":{"type":"object","additionalProperties":false,"required":["enabled"],"properties":{"enabled":{"type":"boolean"}}},"rbac":{"type":"object","additionalProperties":false,"required":["create"],"properties":{"enabled":{"type":"boolean"},"create":{"type":"boolean"}}},"global":{"type":"object","additionalProperties":true,"properties":{"safeToShowValues":{"type":"boolean"}}}}} \ No newline at end of file diff --git a/runtime/chart/values.schema.yaml b/runtime/chart/values.schema.yaml index 9edb1e7..d0cbdae 100644 --- a/runtime/chart/values.schema.yaml +++ b/runtime/chart/values.schema.yaml @@ -3441,6 +3441,37 @@ properties: Priority: user PAT > OAuth token > defaultAccessToken. Helm auto-creates a K8s Secret (jupyterhub-git-default-token) from this value. + hub: + type: object + additionalProperties: false + description: | + Hub-level network settings. + properties: + allowedOrigins: + type: array + items: + type: string + description: | + Sets Access-Control-Allow-Origin on Hub HTTP responses. + Use when the Hub API is accessed cross-origin. + Use ["*"] to allow all, or list specific domains. + + notebook: + type: object + additionalProperties: false + description: | + Notebook server network settings. + properties: + allowedOrigins: + type: array + items: + type: string + description: | + Injected into each notebook server's startup args via + --ServerApp.allow_origin_pat. Needed when kernel WebSocket + connections go through a reverse proxy or non-standard domain. + Use ["*"] to allow all, or list specific domains. + apiService: type: object additionalProperties: false diff --git a/runtime/chart/values.yaml b/runtime/chart/values.yaml index 4043cce..35c416e 100644 --- a/runtime/chart/values.yaml +++ b/runtime/chart/values.yaml @@ -59,6 +59,17 @@ custom: teams: mapping: {} + # Hub-level allowed origins: sets Access-Control-Allow-Origin on Hub HTTP + # responses. Useful when the Hub API is accessed cross-origin. + hub: + allowedOrigins: [] + + # Notebook server allowed origins: injected into each notebook server's + # startup args via --ServerApp.allow_origin_pat. + # Needed when kernel WebSocket connections go through a reverse proxy. + notebook: + allowedOrigins: [] + # User quota management system quota: # Enable/disable quota system (auto-disabled for auto-login/dummy modes if not set) diff --git a/runtime/hub/core/authenticators/firstuse.py b/runtime/hub/core/authenticators/firstuse.py index 6563dc9..1307214 100644 --- a/runtime/hub/core/authenticators/firstuse.py +++ b/runtime/hub/core/authenticators/firstuse.py @@ -26,6 +26,8 @@ from __future__ import annotations +from concurrent.futures import ThreadPoolExecutor + import bcrypt from firstuseauthenticator import FirstUseAuthenticator @@ -105,6 +107,78 @@ def set_password(self, username: str, password: str, force_change: bool = True) suffix = " (force change on next login)" if force_change else "" return f"Password set for {username}{suffix}" + def batch_set_passwords( + self, + users: list[dict], + force_change: bool = True, + ) -> dict: + """Set passwords for multiple users in a single transaction. + + Args: + users: List of dicts with 'username' and 'password' keys. + force_change: Whether to force password change on first login. + + Returns: + Dict with 'success', 'failed' counts and 'results' list. + """ + min_len = getattr(self, "min_password_length", 1) + results = {"success": 0, "failed": 0, "results": []} + + # Validate passwords first + valid_entries = [] + for entry in users: + username = entry["username"] + password = entry["password"] + if not password or len(password) < min_len: + results["failed"] += 1 + results["results"].append( + { + "username": username, + "status": "failed", + "error": f"Password too short (min {min_len})", + } + ) + continue + valid_entries.append((username, password)) + + # Parallel bcrypt hashing (bcrypt releases GIL, threads give real speedup) + def _hash(pw: str) -> bytes: + return bcrypt.hashpw(pw.encode("utf8"), bcrypt.gensalt()) + + with ThreadPoolExecutor() as pool: + hash_results = list(pool.map(_hash, [pw for _, pw in valid_entries])) + hashed = [(username, h) for (username, _), h in zip(valid_entries, hash_results)] + + # Single DB transaction with per-user savepoints + with session_scope() as session: + for username, password_hash in hashed: + try: + with session.begin_nested(): + user_pw = session.query(UserPassword).filter_by(username=username).first() + if user_pw: + user_pw.password_hash = password_hash + user_pw.force_change = force_change + else: + user_pw = UserPassword( + username=username, + password_hash=password_hash, + force_change=force_change, + ) + session.add(user_pw) + results["success"] += 1 + results["results"].append({"username": username, "status": "success"}) + except Exception as e: + results["failed"] += 1 + results["results"].append( + { + "username": username, + "status": "failed", + "error": str(e), + } + ) + + return results + def mark_force_password_change(self, username: str, force: bool = True) -> None: """Mark or unmark a user for forced password change.""" with session_scope() as session: diff --git a/runtime/hub/core/authenticators/github_oauth.py b/runtime/hub/core/authenticators/github_oauth.py index 97c2f87..383456f 100644 --- a/runtime/hub/core/authenticators/github_oauth.py +++ b/runtime/hub/core/authenticators/github_oauth.py @@ -70,6 +70,24 @@ async def authenticate(self, handler, data=None): if expires_in is not None: result["auth_state"]["expires_at"] = time.time() + int(expires_in) + # Fetch GitHub team memberships and store in auth_state for group sync + access_token = result["auth_state"].get("access_token") + if access_token: + from core import z2jh + from core.groups import fetch_github_teams + + allowed_orgs = self.allowed_organizations or set( + z2jh.get_config("hub.config.GitHubOAuthenticator.allowed_organizations", []) + ) + org_name = next(iter(allowed_orgs), "") + if org_name: + try: + teams = await fetch_github_teams(access_token, org_name) + result["auth_state"]["github_teams"] = teams + log.info("Fetched %d GitHub teams for user %s", len(teams), result.get("name", "?")) + except Exception: + log.warning("Failed to fetch GitHub teams during authentication", exc_info=True) + return result async def refresh_user(self, user, handler=None, **kwargs): @@ -93,6 +111,14 @@ async def refresh_user(self, user, handler=None, **kwargs): if not refresh_token: return True + from core import z2jh + from core.groups import fetch_github_teams + + allowed_orgs = self.allowed_organizations or set( + z2jh.get_config("hub.config.GitHubOAuthenticator.allowed_organizations", []) + ) + org_name = next(iter(allowed_orgs), "") + # Proactively refresh if within 10 minutes of expiry expires_at = auth_state.get("expires_at") if expires_at and time.time() > expires_at - 600: @@ -130,6 +156,15 @@ async def refresh_user(self, user, handler=None, **kwargs): if expires_in is not None: auth_model["auth_state"]["expires_at"] = time.time() + int(expires_in) + # Re-fetch GitHub teams with the new token + new_token = auth_model["auth_state"].get("access_token") + if new_token and org_name: + try: + teams = await fetch_github_teams(new_token, org_name) + auth_model["auth_state"]["github_teams"] = teams + except Exception: + log.warning("Failed to refresh GitHub teams for %s", user.name, exc_info=True) + return auth_model # Not close to expiry — let the parent handle the standard flow diff --git a/runtime/hub/core/config.py b/runtime/hub/core/config.py index e5ce7bf..64034b9 100644 --- a/runtime/hub/core/config.py +++ b/runtime/hub/core/config.py @@ -139,6 +139,22 @@ class GitCloneSettings(BaseModel): model_config = {"extra": "allow"} +class HubNetworkSettings(BaseModel): + """Network settings applied to the Hub process.""" + + allowedOrigins: list[str] = Field(default_factory=list) + + model_config = {"extra": "allow"} + + +class NotebookNetworkSettings(BaseModel): + """Network settings applied to each notebook server (singleuser pod).""" + + allowedOrigins: list[str] = Field(default_factory=list) + + model_config = {"extra": "allow"} + + class ParsedConfig(BaseModel): """Parsed configuration from values.yaml custom section.""" @@ -147,6 +163,8 @@ class ParsedConfig(BaseModel): teams: TeamsConfig = Field(default_factory=TeamsConfig) quota: QuotaSettings = Field(default_factory=QuotaSettings) gitClone: GitCloneSettings = Field(default_factory=GitCloneSettings) + hub: HubNetworkSettings = Field(default_factory=HubNetworkSettings) + notebook: NotebookNetworkSettings = Field(default_factory=NotebookNetworkSettings) model_config = {"extra": "allow"} @@ -158,6 +176,8 @@ def from_dicts( teams: dict | None = None, quota: dict | None = None, git_clone: dict | None = None, + hub: dict | None = None, + notebook: dict | None = None, ) -> ParsedConfig: """Create configuration from individual dicts.""" raw_config: dict[str, Any] = {} @@ -172,6 +192,10 @@ def from_dicts( raw_config["quota"] = quota if git_clone: raw_config["gitClone"] = git_clone + if hub: + raw_config["hub"] = hub + if notebook: + raw_config["notebook"] = notebook return cls.model_validate(raw_config) @@ -252,6 +276,8 @@ def init(cls, config_path: str | Path) -> HubConfig: teams=raw_config.get("teams"), quota=raw_config.get("quota"), git_clone=raw_config.get("gitClone"), + hub=raw_config.get("hub"), + notebook=raw_config.get("notebook"), ) # Quota enabled: from config or auto-detect based on auth_mode @@ -322,6 +348,16 @@ def git_clone(self) -> GitCloneSettings: """Get git clone configuration.""" return self._config.gitClone + @property + def hub_network(self) -> HubNetworkSettings: + """Get Hub-level network settings.""" + return self._config.hub + + @property + def notebook_network(self) -> NotebookNetworkSettings: + """Get notebook server network settings.""" + return self._config.notebook + # ========================================================================= # Helper Methods # ========================================================================= diff --git a/runtime/hub/core/groups.py b/runtime/hub/core/groups.py new file mode 100644 index 0000000..c4e7c06 --- /dev/null +++ b/runtime/hub/core/groups.py @@ -0,0 +1,220 @@ +# Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +""" +Group Sync and Resource Resolution + +Provides functions for: +- Fetching GitHub team memberships via API +- Syncing GitHub teams to JupyterHub groups (protected, source=github-team) +- Resolving user resources from JupyterHub group memberships +""" + +from __future__ import annotations + +import logging + +import aiohttp +from jupyterhub.orm import Group as ORMGroup +from jupyterhub.user import User as JupyterHubUser +from sqlalchemy.orm import Session + +log = logging.getLogger("jupyterhub.groups") + +GITHUB_TEAM_SOURCE = "github-team" +SYSTEM_SOURCE = "system" + + +async def fetch_github_teams(access_token: str, org_name: str) -> list[str]: + """Fetch the user's GitHub team slugs for the given organization. + + Args: + access_token: GitHub OAuth access token. + org_name: GitHub organization name to filter teams by. + + Returns: + List of team slugs the user belongs to in the organization. + """ + headers = { + "Authorization": f"token {access_token}", + "Accept": "application/vnd.github.v3+json", + } + + teams: list[str] = [] + try: + async with ( + aiohttp.ClientSession() as session, + session.get("https://api.github.com/user/teams", headers=headers) as resp, + ): + if resp.status == 200: + data = await resp.json() + for team in data: + if team["organization"]["login"] == org_name: + teams.append(team["slug"]) + else: + log.warning("GitHub API returned status %d when fetching teams", resp.status) + except Exception as e: + log.warning("Error fetching GitHub teams: %s", e) + + return teams + + +def sync_user_github_teams( + user: JupyterHubUser, + team_slugs: list[str], + valid_mapping_keys: set[str], + db: Session, +) -> None: + """Sync a user's GitHub team memberships to JupyterHub groups. + + For each team slug that exists in ``valid_mapping_keys``, ensures + a JupyterHub group exists with ``properties.source = "github-team"`` + and adds the user to it. Removes the user from any github-team groups + they no longer belong to. + + Args: + user: JupyterHub User object. + team_slugs: Team slugs the user currently belongs to on GitHub. + valid_mapping_keys: Set of group names that have resource mappings in config. + db: JupyterHub database session (``self.db`` from a handler or hook). + """ + relevant_teams = set(team_slugs) & valid_mapping_keys + assert user.orm_user is not None # populated by JupyterHub on init + + # Ensure groups exist and add user + for team_slug in relevant_teams: + orm_group = db.query(ORMGroup).filter_by(name=team_slug).first() + if orm_group is None: + orm_group = ORMGroup(name=team_slug) + orm_group.properties = {"source": GITHUB_TEAM_SOURCE} # type: ignore[assignment] + db.add(orm_group) + db.commit() + log.info("Created JupyterHub group '%s' (source: github-team)", team_slug) + elif orm_group.properties.get("source") != GITHUB_TEAM_SOURCE: + # GitHub team always takes priority over admin-created groups + orm_group.properties = {**orm_group.properties, "source": GITHUB_TEAM_SOURCE} # type: ignore[assignment] + db.commit() + log.info("Group '%s' promoted to github-team source", team_slug) + + # Add user to group if not already a member + if orm_group not in user.orm_user.groups: + user.orm_user.groups.append(orm_group) + db.commit() + log.info("Added user '%s' to group '%s'", user.name, team_slug) + + # Remove user from github-team groups they no longer belong to + for orm_group in list(user.orm_user.groups): + if orm_group.properties.get("source") == GITHUB_TEAM_SOURCE and orm_group.name not in relevant_teams: + user.orm_user.groups.remove(orm_group) + db.commit() + log.info("Removed user '%s' from group '%s'", user.name, orm_group.name) + + +def assign_user_to_group( + user: JupyterHubUser, + group_name: str, + db: Session, +) -> None: + """Assign a user to a JupyterHub group, creating it if needed. + + Used for native users to assign them to pattern-based groups. + + Args: + user: JupyterHub User object. + group_name: Name of the group to assign to. + db: JupyterHub database session. + """ + assert user.orm_user is not None # populated by JupyterHub on init + + orm_group = db.query(ORMGroup).filter_by(name=group_name).first() + if orm_group is None: + orm_group = ORMGroup(name=group_name) + orm_group.properties = {"source": SYSTEM_SOURCE} # type: ignore[assignment] + db.add(orm_group) + db.commit() + log.info("Created JupyterHub group '%s' (source: system)", group_name) + elif not orm_group.properties.get("source"): + orm_group.properties = {**orm_group.properties, "source": SYSTEM_SOURCE} # type: ignore[assignment] + db.commit() + + if orm_group not in user.orm_user.groups: + user.orm_user.groups.append(orm_group) + db.commit() + log.info("Added user '%s' to group '%s'", user.name, group_name) + + +def get_resources_for_user( + user: JupyterHubUser, + team_resource_mapping: dict[str, list[str]], +) -> list[str]: + """Get available resources for a user based on their JupyterHub group memberships. + + Iterates over the user's groups and looks up each group name in the + ``team_resource_mapping``. If a group maps to ``"official"``, the full + official resource list is returned immediately (short-circuit). + + Args: + user: JupyterHub User object. + team_resource_mapping: Mapping of group/team names to resource lists. + + Returns: + Deduplicated list of resource names the user can access. + """ + assert user.orm_user is not None # populated by JupyterHub on init + user_group_names = {g.name for g in user.orm_user.groups} + available_resources: list[str] = [] + + for group_name in user_group_names: + if group_name not in team_resource_mapping: + continue + available_resources.extend(team_resource_mapping[group_name]) + + # Deduplicate while preserving order + return list(dict.fromkeys(available_resources)) + + +def is_readonly_group(group: ORMGroup) -> bool: + """Check if a group's membership is fully read-only. + + Only system-managed groups are fully read-only. GitHub-team groups + allow manual member additions (admins can add native users to grant + them the same resources). Synced GitHub members are auto-managed: + they may be re-added or removed on the next login sync. + + Args: + group: JupyterHub ORM Group object. + + Returns: + True if the group's source is "system". + """ + return group.properties.get("source") == SYSTEM_SOURCE # type: ignore[union-attr] + + +def is_undeletable_group(group: ORMGroup) -> bool: + """Check if a group cannot be deleted. + + Both GitHub-synced groups and system-managed groups are undeletable. + + Args: + group: JupyterHub ORM Group object. + + Returns: + True if the group's source is "github-team" or "system". + """ + return group.properties.get("source") in (GITHUB_TEAM_SOURCE, SYSTEM_SOURCE) # type: ignore[union-attr] diff --git a/runtime/hub/core/handlers.py b/runtime/hub/core/handlers.py index 7532a26..1361084 100644 --- a/runtime/hub/core/handlers.py +++ b/runtime/hub/core/handlers.py @@ -48,6 +48,14 @@ QuotaRefreshRequest, get_quota_manager, ) +from core.stats_handlers import ( + StatsActiveSSEHandler, + StatsDistributionHandler, + StatsHourlyHandler, + StatsOverviewHandler, + StatsUsageHandler, + StatsUserHandler, +) # ============================================================================= # Module-level configuration (set via configure_handlers) @@ -58,6 +66,8 @@ "quota_rates": {}, "quota_enabled": False, "minimum_quota_to_start": 10, + "default_quota": 0, + "team_resource_mapping": {}, } @@ -66,6 +76,9 @@ def configure_handlers( quota_rates: dict[str, int] | None = None, quota_enabled: bool = False, minimum_quota_to_start: int = 10, + default_quota: int = 0, + team_resource_mapping: dict[str, list[str]] | None = None, + github_org: str = "", ) -> None: """Configure handler module with runtime settings.""" if accelerator_options is not None: @@ -74,6 +87,10 @@ def configure_handlers( _handler_config["quota_rates"] = quota_rates _handler_config["quota_enabled"] = quota_enabled _handler_config["minimum_quota_to_start"] = minimum_quota_to_start + _handler_config["default_quota"] = default_quota + if team_resource_mapping is not None: + _handler_config["team_resource_mapping"] = team_resource_mapping + _handler_config["github_org"] = github_org # ============================================================================= @@ -377,6 +394,77 @@ async def get(self): self.finish(json.dumps({"password": password})) +class AdminAPIBatchSetPasswordHandler(APIHandler): + """API endpoint for batch setting user passwords.""" + + @web.authenticated + async def post(self): + """Set passwords for multiple users in a single request.""" + assert self.current_user is not None + if not self.current_user.admin: + self.set_status(403) + self.set_header("Content-Type", "application/json") + return self.finish(json.dumps({"error": "Admin access required"})) + + try: + data = json.loads(self.request.body.decode("utf-8")) + users = data.get("users", []) + force_change = data.get("force_change", True) + + if not users or not isinstance(users, list): + self.set_status(400) + self.set_header("Content-Type", "application/json") + return self.finish(json.dumps({"error": "users array is required"})) + + if len(users) > 1000: + self.set_status(400) + self.set_header("Content-Type", "application/json") + return self.finish(json.dumps({"error": "Maximum 1000 users per batch"})) + + # Validate entries + for entry in users: + if not isinstance(entry, dict) or "username" not in entry or "password" not in entry: + self.set_status(400) + self.set_header("Content-Type", "application/json") + return self.finish(json.dumps({"error": "Each entry must have username and password"})) + if entry.get("username", "").startswith("github:"): + self.set_status(400) + self.set_header("Content-Type", "application/json") + return self.finish( + json.dumps({"error": f"Cannot set password for GitHub user: {entry['username']}"}) + ) + + firstuse_auth = None + if isinstance(self.authenticator, MultiAuthenticator): + for authenticator in self.authenticator._authenticators: + if isinstance(authenticator, CustomFirstUseAuthenticator): + firstuse_auth = authenticator + break + + if not firstuse_auth: + self.set_status(500) + self.set_header("Content-Type", "application/json") + return self.finish(json.dumps({"error": "Password management not available"})) + + loop = asyncio.get_event_loop() + result = await loop.run_in_executor( + None, lambda: firstuse_auth.batch_set_passwords(users, force_change=force_change) + ) + + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + except json.JSONDecodeError: + self.set_status(400) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps({"error": "Invalid JSON"})) + except Exception as e: + self.log.error(f"Failed to batch set passwords: {e}") + self.set_status(500) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps({"error": "Internal server error"})) + + # ============================================================================= # Quota Management Handlers # ============================================================================= @@ -531,19 +619,44 @@ async def post(self): quota_manager = get_quota_manager() admin_name = self.current_user.name + # Separate unlimited and regular users + unlimited_users = [u for u in req.users if u.unlimited is True] + unset_unlimited_users = [u for u in req.users if u.unlimited is False] + regular_users = [u for u in req.users if u.unlimited is None] + results = {"success": 0, "failed": 0, "details": []} - for user in req.users: + # Handle unlimited users + for user in unlimited_users: + try: + quota_manager.set_unlimited(user.username, True, admin_name) + results["success"] += 1 + results["details"].append({"username": user.username, "status": "success", "unlimited": True}) + except Exception: + results["failed"] += 1 + results["details"].append( + {"username": user.username, "status": "failed", "error": "Processing error"} + ) + + # Handle unset-unlimited users + for user in unset_unlimited_users: try: - quota_manager.set_balance(user.username, user.amount, admin_name) + quota_manager.set_unlimited(user.username, False, admin_name) results["success"] += 1 - results["details"].append({"username": user.username, "status": "success", "balance": user.amount}) except Exception: results["failed"] += 1 results["details"].append( {"username": user.username, "status": "failed", "error": "Processing error"} ) + # Batch set balance for regular users + unset-unlimited users in single transaction + batch_users = [(u.username, u.amount) for u in regular_users + unset_unlimited_users] + if batch_users: + batch_result = quota_manager.batch_set_quota(batch_users, admin_name) + results["success"] += batch_result["success"] + results["failed"] += batch_result["failed"] + results["details"].extend(batch_result.get("details", [])) + self.set_header("Content-Type", "application/json") self.finish(json.dumps(results)) @@ -641,6 +754,7 @@ async def get(self): "enabled": _handler_config["quota_enabled"], "rates": _handler_config["quota_rates"], "minimum_to_start": _handler_config["minimum_quota_to_start"], + "default_quota": _handler_config["default_quota"], } ) ) @@ -1058,6 +1172,261 @@ async def get(self): self.finish(json.dumps({"repos": repos, "installed": installed})) +# ============================================================================= +# Group Management API Handlers +# ============================================================================= + + +class GroupsAPIHandler(APIHandler): + """Admin API handler for listing groups with enriched source and resources info.""" + + @web.authenticated + async def get(self): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + from jupyterhub.orm import Group as ORMGroup + + team_resource_mapping = _handler_config.get("team_resource_mapping", {}) + orm_groups = self.db.query(ORMGroup).order_by(ORMGroup.name).all() + + # Lazy backfill: load_groups creates the group at startup but can't + # set properties on existing groups. Tag it here on first admin access. + from core.groups import SYSTEM_SOURCE + + for g in orm_groups: + if g.name == "native-users" and not g.properties.get("source"): + g.properties = {**g.properties, "source": SYSTEM_SOURCE} + self.db.commit() + + groups = [] + for g in orm_groups: + source = g.properties.get("source", "admin") + resources = team_resource_mapping.get(g.name, []) + groups.append( + { + "name": g.name, + "users": [u.name for u in g.users], + "properties": dict(g.properties), + "source": source, + "resources": resources, + } + ) + + github_org = _handler_config.get("github_org", "") + self.set_header("Content-Type", "application/json") + self.write(json.dumps({"groups": groups, "github_org": github_org})) + + +class GroupDetailAPIHandler(APIHandler): + """Admin API handler for a single group with protection for github-team groups.""" + + @web.authenticated + async def delete(self, group_name): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + from jupyterhub.orm import Group as ORMGroup + + orm_group = self.db.query(ORMGroup).filter_by(name=group_name).first() + if not orm_group: + raise web.HTTPError(404, f"Group '{group_name}' not found") + + from core.groups import is_undeletable_group + + if is_undeletable_group(orm_group): + raise web.HTTPError(403, "Cannot delete a protected group") + + self.db.delete(orm_group) + self.db.commit() + self.set_status(204) + + @web.authenticated + async def patch(self, group_name): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + from jupyterhub.orm import Group as ORMGroup + + orm_group = self.db.query(ORMGroup).filter_by(name=group_name).first() + if not orm_group: + raise web.HTTPError(404, f"Group '{group_name}' not found") + + try: + body = json.loads(self.request.body) + except (json.JSONDecodeError, ValueError): + raise web.HTTPError(400, "Invalid JSON body") from None + + # Release protection: convert a protected group to admin-managed + if body.get("release_protection"): + orm_group.properties = {k: v for k, v in orm_group.properties.items() if k != "source"} + self.db.commit() + elif "properties" in body: + new_props = body["properties"] + # Preserve system-managed reserved keys + reserved_keys = ("source",) + for key in reserved_keys: + existing = orm_group.properties.get(key) + if existing is not None: + new_props[key] = existing + orm_group.properties = new_props + self.db.commit() + + team_resource_mapping = _handler_config.get("team_resource_mapping", {}) + source = orm_group.properties.get("source", "admin") + resources = team_resource_mapping.get(orm_group.name, []) + + self.write( + json.dumps( + { + "name": orm_group.name, + "users": [u.name for u in orm_group.users], + "properties": dict(orm_group.properties), + "source": source, + "resources": resources, + } + ) + ) + + +class GroupMembersAPIHandler(APIHandler): + """Admin API handler for group membership with protection for github-team groups.""" + + @web.authenticated + async def post(self, group_name): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + from jupyterhub.orm import Group as ORMGroup + + orm_group = self.db.query(ORMGroup).filter_by(name=group_name).first() + if not orm_group: + raise web.HTTPError(404, f"Group '{group_name}' not found") + + from core.groups import is_readonly_group + + if is_readonly_group(orm_group): + raise web.HTTPError(403, "Cannot modify members of a protected group") + + try: + body = json.loads(self.request.body) + except (json.JSONDecodeError, ValueError): + raise web.HTTPError(400, "Invalid JSON body") from None + usernames = body.get("users", []) + + from jupyterhub.orm import User as ORMUser + + for username in usernames: + user = self.db.query(ORMUser).filter_by(name=username).first() + if user and user not in orm_group.users: + orm_group.users.append(user) + self.db.commit() + + team_resource_mapping = _handler_config.get("team_resource_mapping", {}) + self.write( + json.dumps( + { + "name": orm_group.name, + "users": [u.name for u in orm_group.users], + "properties": dict(orm_group.properties), + "source": orm_group.properties.get("source", "admin"), + "resources": team_resource_mapping.get(orm_group.name, []), + } + ) + ) + + @web.authenticated + async def delete(self, group_name): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + from jupyterhub.orm import Group as ORMGroup + + orm_group = self.db.query(ORMGroup).filter_by(name=group_name).first() + if not orm_group: + raise web.HTTPError(404, f"Group '{group_name}' not found") + + from core.groups import is_readonly_group + + if is_readonly_group(orm_group): + raise web.HTTPError(403, "Cannot modify members of a protected group") + + try: + body = json.loads(self.request.body) + except (json.JSONDecodeError, ValueError): + raise web.HTTPError(400, "Invalid JSON body") from None + usernames = body.get("users", []) + + from jupyterhub.orm import User as ORMUser + + for username in usernames: + user = self.db.query(ORMUser).filter_by(name=username).first() + if user and user in orm_group.users: + orm_group.users.remove(user) + self.db.commit() + + team_resource_mapping = _handler_config.get("team_resource_mapping", {}) + self.write( + json.dumps( + { + "name": orm_group.name, + "users": [u.name for u in orm_group.users], + "properties": dict(orm_group.properties), + "source": orm_group.properties.get("source", "admin"), + "resources": team_resource_mapping.get(orm_group.name, []), + } + ) + ) + + +class GroupSyncAPIHandler(APIHandler): + """Admin API handler to manually trigger GitHub team sync for all users.""" + + @web.authenticated + async def post(self): + if not self.current_user.admin: + raise web.HTTPError(403, "Admin access required") + + github_org = _handler_config.get("github_org", "") + if not github_org: + raise web.HTTPError(400, "No GitHub organization configured") + + from core.groups import fetch_github_teams, sync_user_github_teams + + team_resource_mapping = _handler_config.get("team_resource_mapping", {}) + valid_mapping_keys = set(team_resource_mapping.keys()) + + synced = 0 + failed = 0 + skipped = 0 + + for user in self.users.values(): + if not user.name.startswith("github:"): + skipped += 1 + continue + + try: + auth_state = await user.get_auth_state() + if not auth_state or "access_token" not in auth_state: + skipped += 1 + continue + + access_token = auth_state["access_token"] + teams = await fetch_github_teams(access_token, github_org) + sync_user_github_teams(user, teams, valid_mapping_keys, self.db) + + # Update auth_state so next spawn uses fresh data + auth_state["github_teams"] = teams + await user.save_auth_state(auth_state) + + synced += 1 + except Exception: + self.log.warning("Failed to sync teams for %s", user.name, exc_info=True) + failed += 1 + + self.write(json.dumps({"synced": synced, "failed": failed, "skipped": skipped})) + + # ============================================================================= # Handler Registration # ============================================================================= @@ -1079,7 +1448,13 @@ def get_handlers() -> list[tuple[str, type]]: (r"/admin/users", AdminUIHandler), (r"/admin/groups", AdminUIHandler), (r"/admin/api/set-password", AdminAPISetPasswordHandler), + (r"/admin/api/batch-set-password", AdminAPIBatchSetPasswordHandler), (r"/admin/api/generate-password", AdminAPIGeneratePasswordHandler), + # Group management API + (r"/admin/api/groups/?", GroupsAPIHandler), + (r"/admin/api/groups/sync/?", GroupSyncAPIHandler), + (r"/admin/api/groups/([^/]+)/?", GroupDetailAPIHandler), + (r"/admin/api/groups/([^/]+)/users/?", GroupMembersAPIHandler), # Accelerator info API (r"/api/accelerators", AcceleratorsAPIHandler), # Resources API @@ -1097,6 +1472,15 @@ def get_handlers() -> list[tuple[str, type]]: (r"/admin/api/quota/([^/]+)", QuotaAPIHandler), (r"/api/quota/rates", QuotaRatesAPIHandler), (r"/api/quota/me", UserQuotaInfoHandler), + # Usage stats API + (r"/admin/api/stats/overview", StatsOverviewHandler), + (r"/admin/api/stats/usage", StatsUsageHandler), + (r"/admin/api/stats/distribution", StatsDistributionHandler), + (r"/admin/api/stats/hourly", StatsHourlyHandler), + (r"/admin/api/stats/active/stream", StatsActiveSSEHandler), + (r"/admin/api/stats/user/([^/]+)", StatsUserHandler), + # Dashboard UI + (r"/admin/dashboard", AdminUIHandler), ] @@ -1119,6 +1503,10 @@ def get_handlers() -> list[tuple[str, type]]: "UserQuotaInfoHandler", "ResourcesAPIHandler", "GitHubReposHandler", + # Group management handlers + "GroupsAPIHandler", + "GroupDetailAPIHandler", + "GroupMembersAPIHandler", # Configuration "configure_handlers", # Registration diff --git a/runtime/hub/core/jupyterhub_config.py b/runtime/hub/core/jupyterhub_config.py index 3264c3d..04925c0 100644 --- a/runtime/hub/core/jupyterhub_config.py +++ b/runtime/hub/core/jupyterhub_config.py @@ -60,6 +60,16 @@ def get_config() -> Config: ... setup_hub(c) +# Hub allowed origins: set CORS headers on the Hub's Tornado server +_hub_config = HubConfig.get() +_hub_allowed_origins = _hub_config.hub_network.allowedOrigins +if _hub_allowed_origins: + _origin_val = "*" if "*" in _hub_allowed_origins else ", ".join(_hub_allowed_origins) + _ts = dict(c.JupyterHub.tornado_settings or {}) + _ts.setdefault("headers", {})["Access-Control-Allow-Origin"] = _origin_val + c.JupyterHub.tornado_settings = _ts + print(f"[CONFIG] Hub allowedOrigins: {_hub_allowed_origins}") + # ============================================================================= # Z2JH Standard Configuration (Kubernetes-specific) # ============================================================================= diff --git a/runtime/hub/core/quota/manager.py b/runtime/hub/core/quota/manager.py index 89cb429..34e55c3 100644 --- a/runtime/hub/core/quota/manager.py +++ b/runtime/hub/core/quota/manager.py @@ -30,7 +30,9 @@ import threading from datetime import datetime, timedelta -from core.database import get_session, session_scope +from sqlalchemy import inspect, text + +from core.database import get_engine, get_session, session_scope from core.quota.orm import QuotaTransaction, UsageSession, UserQuota # Re-export models for backwards compatibility @@ -71,6 +73,35 @@ def __init__(self): self._op_lock = threading.Lock() self._initialized = True print("[QUOTA] QuotaManager initialized") + self._ensure_schema() + + def _ensure_schema(self) -> None: + """Ensure expected columns exist on quota tables.""" + try: + engine = get_engine() + inspector = inspect(engine) + columns = {col["name"] for col in inspector.get_columns(UsageSession.__tablename__)} + added_column = False + if "accelerator_type" not in columns: + with engine.begin() as conn: + conn.execute( + text(f"ALTER TABLE {UsageSession.__tablename__} ADD COLUMN accelerator_type VARCHAR(100)") + ) + added_column = True + print("[QUOTA] Added accelerator_type column to quota_usage_sessions") + + with engine.begin() as conn: + result = conn.execute( + text( + f"UPDATE {UsageSession.__tablename__} " + "SET accelerator_type = resource_type " + "WHERE accelerator_type IS NULL OR accelerator_type = ''" + ) + ) + if added_column or (getattr(result, "rowcount", 0) or 0) > 0: + print("[QUOTA] Backfilled accelerator_type on quota_usage_sessions") + except Exception as exc: + print(f"[QUOTA] Warning: failed to ensure accelerator_type column: {exc}") def get_balance(self, username: str) -> int: """Get user's current quota balance.""" @@ -272,13 +303,14 @@ def deduct_quota(self, username: str, amount: int, resource_type: str = "") -> i return balance_after - def start_session(self, username: str, resource_type: str) -> int: + def start_session(self, username: str, resource_type: str, accelerator_type: str | None = None) -> int: """Start a usage session and return session ID.""" username = username.lower() with session_scope() as session: usage_session = UsageSession( username=username, resource_type=resource_type, + accelerator_type=accelerator_type, start_time=datetime.now(), status="active", ) @@ -308,17 +340,22 @@ def end_session(self, session_id: int, quota_consumed: int = 0) -> dict | None: "session_id": usage_session.id, "username": usage_session.username, "resource_type": usage_session.resource_type, + "accelerator_type": usage_session.accelerator_type, "duration_minutes": int(duration), "quota_consumed": quota_consumed, } - def end_usage_session(self, session_id: int, quota_rates: dict[str, int]) -> tuple[int, int]: + def end_usage_session(self, session_id: int, quota_rates: dict[str, int] | None = None) -> tuple[int, int]: """ - End a usage session and calculate quota consumed. + End a usage session and optionally deduct quota. + + Always records session duration. Quota deduction only happens when + quota_rates is provided (quota system is enabled). Args: session_id: The session ID to end - quota_rates: Mapping of resource_type to quota rate per minute + quota_rates: Mapping of resource_type to quota rate per minute. + Pass None to skip quota deduction (usage tracking only). Returns: Tuple of (duration_minutes, quota_consumed) @@ -331,32 +368,36 @@ def end_usage_session(self, session_id: int, quota_rates: dict[str, int]) -> tup end_time = datetime.now() duration_minutes = int((end_time - usage_session.start_time).total_seconds() / 60) - # Calculate quota based on resource type and duration - rate = quota_rates.get(usage_session.resource_type, 1) - quota_consumed = duration_minutes * rate - usage_session.end_time = end_time usage_session.duration_minutes = duration_minutes - usage_session.quota_consumed = quota_consumed usage_session.status = "completed" - # Deduct quota from user balance - if quota_consumed > 0: - user = session.query(UserQuota).filter(UserQuota.username == usage_session.username).first() - if user and not user.unlimited: - balance_before = user.balance - user.balance = max(0, balance_before - quota_consumed) - - transaction = QuotaTransaction( - username=usage_session.username, - amount=-quota_consumed, - transaction_type="usage", - resource_type=usage_session.resource_type, - balance_before=balance_before, - balance_after=user.balance, - description=f"Session {session_id}: {duration_minutes} min @ {rate}/min", - ) - session.add(transaction) + quota_consumed = 0 + if quota_rates is not None: + # Calculate and deduct quota + accelerator_key = usage_session.accelerator_type or usage_session.resource_type + rate = quota_rates.get(accelerator_key, quota_rates.get("cpu", 1)) + quota_consumed = duration_minutes * rate + usage_session.quota_consumed = quota_consumed + + if quota_consumed > 0: + user = session.query(UserQuota).filter(UserQuota.username == usage_session.username).first() + if user and not user.unlimited: + balance_before = user.balance + user.balance = max(0, balance_before - quota_consumed) + + transaction = QuotaTransaction( + username=usage_session.username, + amount=-quota_consumed, + transaction_type="usage", + resource_type=usage_session.resource_type, + balance_before=balance_before, + balance_after=user.balance, + description=( + f"Session {session_id}: {duration_minutes} min @ {accelerator_key} ({rate}/min)" + ), + ) + session.add(transaction) return (duration_minutes, quota_consumed) @@ -377,6 +418,7 @@ def get_active_session(self, username: str) -> dict | None: "session_id": usage_session.id, "username": usage_session.username, "resource_type": usage_session.resource_type, + "accelerator_type": usage_session.accelerator_type, "start_time": usage_session.start_time.isoformat(), } finally: @@ -410,6 +452,7 @@ def cleanup_stale_sessions(self, max_duration_minutes: int = 480) -> list[dict]: "session_id": usage_session.id, "username": usage_session.username, "resource_type": usage_session.resource_type, + "accelerator_type": usage_session.accelerator_type, "duration_minutes": duration_minutes, } ) @@ -476,15 +519,38 @@ def get_all_balances(self) -> list[dict]: session.close() def batch_set_quota(self, users: list[tuple[str, int]], admin: str | None = None) -> dict: - """Set quota for multiple users at once.""" - results = {"success": 0, "failed": 0} - for username, amount in users: - try: - self.set_balance(username, amount, admin) - results["success"] += 1 - except Exception as e: - results["failed"] += 1 - print(f"Failed to set quota for {username}: {e}") + """Set quota for multiple users in a single transaction with per-user savepoints.""" + results = {"success": 0, "failed": 0, "details": []} + with self._op_lock, session_scope() as session: + for username, amount in users: + try: + with session.begin_nested(): + uname = username.lower() + user = session.query(UserQuota).filter(UserQuota.username == uname).first() + if not user: + user = UserQuota(username=uname, balance=amount) + session.add(user) + balance_before = 0 + else: + balance_before = user.balance + user.balance = amount + + transaction = QuotaTransaction( + username=uname, + amount=amount - balance_before, + transaction_type="set", + balance_before=balance_before, + balance_after=amount, + description=f"Balance set to {amount}", + created_by=admin, + ) + session.add(transaction) + results["success"] += 1 + results["details"].append({"username": uname, "status": "success", "balance": amount}) + except Exception as e: + results["failed"] += 1 + results["details"].append({"username": username, "status": "failed", "error": str(e)}) + print(f"Failed to set quota for {username}: {e}") return results def _match_targets(self, username: str, balance: int, is_unlimited: bool, targets: dict) -> bool: diff --git a/runtime/hub/core/quota/migrate.py b/runtime/hub/core/quota/migrate.py index 9cd4e7f..406760d 100644 --- a/runtime/hub/core/quota/migrate.py +++ b/runtime/hub/core/quota/migrate.py @@ -167,6 +167,7 @@ def migrate_quota_data(target_db_url: str) -> dict: usage_session = UsageSession( username=row["username"], resource_type=row["resource_type"], + accelerator_type=row["resource_type"], start_time=_parse_datetime(row["start_time"]) or datetime.now(), end_time=_parse_datetime(_row_get(row, "end_time")), duration_minutes=_row_get(row, "duration_minutes"), diff --git a/runtime/hub/core/quota/models.py b/runtime/hub/core/quota/models.py index c6e7d2e..875d766 100644 --- a/runtime/hub/core/quota/models.py +++ b/runtime/hub/core/quota/models.py @@ -69,7 +69,8 @@ class BatchQuotaUser(BaseModel): """User entry for batch quota operation.""" username: str = Field(..., min_length=1, max_length=200, pattern=r"^[a-zA-Z0-9._@-]+$") - amount: int = Field(..., ge=-10_000_000, le=10_000_000) + amount: int = Field(default=0, ge=-10_000_000, le=10_000_000) + unlimited: bool | None = Field(default=None, description="Set unlimited quota (overrides amount)") class BatchQuotaRequest(BaseModel): diff --git a/runtime/hub/core/quota/orm.py b/runtime/hub/core/quota/orm.py index 405a1bf..96adcee 100644 --- a/runtime/hub/core/quota/orm.py +++ b/runtime/hub/core/quota/orm.py @@ -72,6 +72,7 @@ class UsageSession(Base): id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) username: Mapped[str] = mapped_column(String(255), nullable=False, index=True) resource_type: Mapped[str] = mapped_column(String(100), nullable=False) + accelerator_type: Mapped[str | None] = mapped_column(String(100), nullable=True) start_time: Mapped[datetime] = mapped_column(DateTime, nullable=False) end_time: Mapped[datetime | None] = mapped_column(DateTime) duration_minutes: Mapped[int | None] = mapped_column(Integer) diff --git a/runtime/hub/core/setup.py b/runtime/hub/core/setup.py index 43b8a88..8afbf28 100644 --- a/runtime/hub/core/setup.py +++ b/runtime/hub/core/setup.py @@ -84,6 +84,14 @@ def setup_hub(c: Any) -> None: c.JupyterHub.spawner_class = RemoteLabKubeSpawner + # ========================================================================= + # Pre-create System Groups + # ========================================================================= + # Ensure system-managed groups exist at startup (before any user logs in). + # Note: load_groups does NOT set properties on existing groups, so the + # source=system backfill is handled lazily in the admin groups API handler. + c.JupyterHub.load_groups = {"native-users": []} + # ========================================================================= # Configure Authenticator # ========================================================================= @@ -94,9 +102,50 @@ def setup_hub(c: Any) -> None: async def auth_state_hook(spawner, auth_state): if auth_state is None: spawner.github_access_token = None + # Still assign native users to their default group + if not spawner.user.name.startswith("github:"): + try: + from core.groups import assign_user_to_group + + assign_user_to_group(spawner.user, "native-users", spawner.user.db) + except Exception as e: + print(f"[GROUPS] Warning: Failed to assign native user group for {spawner.user.name}: {e}") return spawner.github_access_token = auth_state.get("access_token") + # Sync GitHub teams to JupyterHub groups. + # Always fetch fresh teams from GitHub at spawn time so that team + # membership changes (add/remove) are reflected without requiring + # the user to log out and back in. + if spawner.user.name.startswith("github:"): + access_token = auth_state.get("access_token") + if access_token and config.github_org_name: + try: + from core.groups import fetch_github_teams, sync_user_github_teams + + github_teams = await fetch_github_teams(access_token, config.github_org_name) + valid_mapping_keys = set(config.teams.mapping.keys()) + sync_user_github_teams( + spawner.user, + github_teams, + valid_mapping_keys, + spawner.user.db, + ) + # Update cached teams in auth_state so refresh_user() + # retains the latest team list across token refreshes. + auth_state["github_teams"] = github_teams + await spawner.user.save_auth_state(auth_state) + except Exception as e: + print(f"[GROUPS] Warning: Failed to sync GitHub teams for {spawner.user.name}: {e}") + elif not spawner.user.name.startswith("github:"): + # Native user with auth_state but no GitHub teams + try: + from core.groups import assign_user_to_group + + assign_user_to_group(spawner.user, "native-users", spawner.user.db) + except Exception as e: + print(f"[GROUPS] Warning: Failed to assign native user group for {spawner.user.name}: {e}") + c.Spawner.auth_state_hook = auth_state_hook # Set authenticator based on mode @@ -126,6 +175,9 @@ async def auth_state_hook(spawner, auth_state): quota_rates=config.build_quota_rates(), quota_enabled=config.quota_enabled, minimum_quota_to_start=config.quota.minimumToStart, + default_quota=config.quota.defaultQuota, + team_resource_mapping=dict(config.teams.mapping), + github_org=config.github_org_name, ) if not hasattr(c.JupyterHub, "extra_handlers") or c.JupyterHub.extra_handlers is None: @@ -134,6 +186,59 @@ async def auth_state_hook(spawner, auth_state): for route, handler in get_handlers(): c.JupyterHub.extra_handlers.append((route, handler)) + # ========================================================================= + # Protect GitHub-synced groups in native JupyterHub API + # ========================================================================= + # + # JupyterHub registers its own /api/groups/* handlers in default_handlers + # BEFORE extra_handlers, so extra_handlers cannot override them (Tornado + # matches first-registered route first). We replace the handler classes + # in-place within the default_handlers list so that the native routes + # point to our protected subclasses. + + from jupyterhub.apihandlers import default_handlers as _api_default_handlers + from jupyterhub.apihandlers.groups import ( + GroupAPIHandler as _OrigGroupAPI, + ) + from jupyterhub.apihandlers.groups import ( + GroupUsersAPIHandler as _OrigGroupUsersAPI, + ) + from tornado import web + + from core.groups import is_readonly_group as _is_readonly + from core.groups import is_undeletable_group as _is_undeletable + + class _ProtectedGroupAPIHandler(_OrigGroupAPI): + def delete(self, group_name): + group = self.find_group(group_name) + if _is_undeletable(group): + raise web.HTTPError(403, "Cannot delete a protected group") + return super().delete(group_name) + + class _ProtectedGroupUsersAPIHandler(_OrigGroupUsersAPI): + def post(self, group_name): + group = self.find_group(group_name) + if _is_readonly(group): + raise web.HTTPError(403, "Cannot modify members of a protected group") + return super().post(group_name) + + async def delete(self, group_name): + group = self.find_group(group_name) + if _is_readonly(group): + raise web.HTTPError(403, "Cannot modify members of a protected group") + return await super().delete(group_name) + + _replacements = { + _OrigGroupAPI: _ProtectedGroupAPIHandler, + _OrigGroupUsersAPI: _ProtectedGroupUsersAPIHandler, + } + + for i, (route, handler) in enumerate(_api_default_handlers): + if handler in _replacements: + _api_default_handlers[i] = (route, _replacements[handler]) + + print("[SETUP] Protected GitHub-synced groups in native JupyterHub API") + # ========================================================================= # Determine Database URL # ========================================================================= @@ -176,24 +281,28 @@ async def auth_state_hook(spawner, auth_state): # Initialize Quota Manager # ========================================================================= + # Always initialize QuotaManager for session tracking (regardless of quota_enabled) + try: + from core.quota import init_quota_manager + + quota_manager = init_quota_manager() + stale_sessions = quota_manager.cleanup_stale_sessions() + if stale_sessions: + print(f"[QUOTA] Cleaned up {len(stale_sessions)} stale sessions on startup") + active_count = quota_manager.get_active_sessions_count() + print(f"[QUOTA] {active_count} active sessions found") + except Exception as e: + print(f"[QUOTA] Warning: Failed to initialize quota manager: {e}") + if config.quota_enabled: try: - from core.quota import init_quota_manager from core.quota.migrate import check_migration_needed, migrate_quota_data - # Check and run migration from old quota.sqlite if needed if check_migration_needed(): print("[QUOTA] Migrating data from old quota.sqlite...") migrate_quota_data(db_url) - - quota_manager = init_quota_manager() - stale_sessions = quota_manager.cleanup_stale_sessions() - if stale_sessions: - print(f"[QUOTA] Cleaned up {len(stale_sessions)} stale sessions on startup") - active_count = quota_manager.get_active_sessions_count() - print(f"[QUOTA] {active_count} active sessions found") except Exception as e: - print(f"[QUOTA] Warning: Failed to initialize quota manager: {e}") + print(f"[QUOTA] Warning: Failed to run quota migration: {e}") # ========================================================================= # API Token diff --git a/runtime/hub/core/spawner/kubernetes.py b/runtime/hub/core/spawner/kubernetes.py index c869d3f..14226b6 100644 --- a/runtime/hub/core/spawner/kubernetes.py +++ b/runtime/hub/core/spawner/kubernetes.py @@ -35,7 +35,6 @@ from typing import TYPE_CHECKING, Any from urllib.parse import urlparse -import aiohttp from jupyterhub.user import User as JupyterHubUser from kubespawner import KubeSpawner from tornado import web @@ -101,6 +100,9 @@ class RemoteLabKubeSpawner(KubeSpawner): DEFAULT_ACCESS_TOKEN: bool = False DEFAULT_ACCESS_TOKEN_SECRET: str = "jupyterhub-git-default-token" + # Allowed origins for notebook server WebSocket connections + notebook_allowed_origins: list[str] = [] + @classmethod def configure_from_config(cls, config: HubConfig) -> None: """ @@ -143,118 +145,69 @@ def configure_from_config(cls, config: HubConfig) -> None: cls.GITHUB_APP_NAME = git_config.githubAppName cls.DEFAULT_ACCESS_TOKEN = bool(git_config.defaultAccessToken) - async def get_user_teams(self) -> list[str]: - """ - Get available resources for the user based on their GitHub team membership. + # Extract singleuser allowed origins + cls.notebook_allowed_origins = list(config.notebook_network.allowedOrigins) + + async def get_user_resources(self) -> list[str]: + """Get available resources for the user based on their JupyterHub group memberships. + + For auto-login/dummy modes, returns the "official" resource set. + For all other users, resolves resources from JupyterHub groups + (which are synced from GitHub teams or assigned to native users + via the auth_state_hook). Falls back to legacy pattern matching + for native users with no group assignments. Returns: List of resource names the user can access """ username = self.user.name.strip() - username_upper = username.upper() - self.log.debug(f"Checking resource group for user: {username}") + self.log.debug(f"Resolving resources for user: {username}") # Auto-login or dummy mode: grant all resources if self.auth_mode in ["auto-login", "dummy"]: self.log.debug(f"Auth mode '{self.auth_mode}': granting all resources") return self.team_resource_mapping.get("official", []) - # Native users (no prefix) - check by absence of "github:" prefix - if not username.startswith("github:"): - self.log.debug(f"Native user detected: {username}") - if "AUP" in username_upper: - self.log.debug("Matched AUP user group") - return self.team_resource_mapping.get("AUP", []) - elif "TEST" in username_upper: - self.log.debug("Matched TEST user group") - return self.team_resource_mapping.get("official", []) - # Default for native users - self.log.debug("Native user with default resources") - return self.team_resource_mapping.get("native-users", self.team_resource_mapping.get("official", [])) - - # GitHub users - fetch team membership - auth_state = await self.user.get_auth_state() - if not auth_state or "access_token" not in auth_state: - self.log.debug( - "No auth state or access token found, setting to NONE, check if there is a local account config error." - ) - return ["none"] - - access_token = auth_state["access_token"] - headers = { - "Authorization": f"token {access_token}", - "Accept": "application/vnd.github.v3+json", - } - - teams = [] - try: - async with ( - aiohttp.ClientSession() as session, - session.get("https://api.github.com/user/teams", headers=headers) as resp, - ): - if resp.status == 200: - data = await resp.json() - for team in data: - if team["organization"]["login"] == self.github_org_name: - teams.append(team["slug"]) - else: - self.log.debug(f"GitHub API request failed with status {resp.status}") - except Exception as e: - self.log.debug(f"Error fetching teams: {e}") - - # Map teams to available resources - available_resources = [] - for team, resources in self.team_resource_mapping.items(): - if team in teams: - if team == "official": - available_resources = self.team_resource_mapping[team] - break - else: - available_resources.extend(resources) + # Resolve resources from JupyterHub groups + from core.groups import get_resources_for_user - # Remove duplicates while preserving order - available_resources = list(dict.fromkeys(available_resources)) + available_resources = get_resources_for_user(self.user, self.team_resource_mapping) - # If no teams found, provide basic access - if not available_resources: - available_resources = ["none"] - self.log.debug("No team info for this user, set to none") + if available_resources: + self.log.debug(f"User '{username}' resources from groups: {available_resources}") + return available_resources - self.log.debug(f"User teams: {teams} Available resources: {available_resources}") + # Defensive fallback: auth_state_hook should have already assigned + # native users to the "native-users" group, but if that failed for + # any reason, fall back to the mapping entry directly. + if not username.startswith("github:"): + self.log.debug(f"Native user '{username}' has no groups, using default fallback") + return self.team_resource_mapping.get("native-users", self.team_resource_mapping.get("official", [])) - return available_resources + # GitHub user with no matching groups + self.log.debug(f"No resources found for user '{username}', set to none") + return ["none"] async def options_form(self, _) -> str: - """Generate the HTML form for resource selection.""" + """Generate the HTML form for resource selection. + + Returns a -""" - - html_content = html_content.replace("", injection_script) + available_resources_js = json.dumps(available_resource_names) + single_node_mode_js = "true" if self.single_node_mode else "false" - self.log.debug(f"Successfully loaded template from {template_file}") - return html_content - else: - self.log.debug(f"Failed to load template from {template_file}, Fall back to basic form.") - return self._generate_fallback_form(available_resource_names) + return ( + "" + ) except Exception as e: self.log.error(f"Failed to load options form: {e}", exc_info=True) @@ -748,18 +701,20 @@ async def start(self): # Determine accelerator type for quota calculation accelerator_type = gpu_selection if gpu_selection else "cpu" - # Quota check (if enabled) - if self.quota_enabled: - from core.quota import get_quota_manager + from core.quota import get_quota_manager - quota_manager = get_quota_manager() + quota_manager = get_quota_manager() + # Always start a usage session for tracking, regardless of quota state + self.usage_session_id = quota_manager.start_usage_session(username, resource_type, accelerator_type) + + # Quota check (if enabled) + if self.quota_enabled: # Check if user has unlimited quota has_unlimited = quota_manager.is_unlimited_in_db(username) if has_unlimited: print(f"[QUOTA] User {username} has unlimited quota, skipping quota check") - self.usage_session_id = None self._has_unlimited_quota = True else: can_start, message, estimated_cost = quota_manager.can_start_container( @@ -777,14 +732,11 @@ async def start(self): f"Cannot start container: {message}. Please contact administrator to add quota.", ) - # Start usage session for tracking - self.usage_session_id = quota_manager.start_usage_session(username, accelerator_type) self._has_unlimited_quota = False print( f"[QUOTA] Session {self.usage_session_id} started for {username} ({accelerator_type}), estimated cost: {estimated_cost}" ) else: - self.usage_session_id = None self._has_unlimited_quota = True start_time = int(time.time()) @@ -802,6 +754,17 @@ async def start(self): } ) + # Inject allowed origins into notebook server startup args + if self.notebook_allowed_origins: + origin_pat = "|".join(re.escape(o) if o != "*" else ".*" for o in self.notebook_allowed_origins) + extra_args = list(self.args or []) + extra_args += [ + f"--ServerApp.allow_origin_pat={origin_pat}", + ] + if "*" in self.notebook_allowed_origins: + extra_args.append("--ServerApp.allow_origin=*") + self.args = extra_args + # Prefer a repo URL provided by the frontend only; do not fallback to config try: repo_url = str(self.user_options.get("repo_url", "") or "").strip() @@ -947,19 +910,29 @@ async def stop(self, now=False): # Clean up any leftover git token secrets await self._cleanup_git_token_secrets() - if self.quota_enabled and hasattr(self, "usage_session_id") and self.usage_session_id: - session_id = self.usage_session_id - username = self.user.name - self.usage_session_id = None + username = self.user.name + try: + from core.quota import get_quota_manager - try: - from core.quota import get_quota_manager + quota_manager = get_quota_manager() + quota_rates = self.quota_rates if self.quota_enabled else None - quota_manager = get_quota_manager() - duration, quota_used = quota_manager.end_usage_session(session_id, self.quota_rates) - print(f"[QUOTA] Session ended for {username}. Duration: {duration} min, Quota used: {quota_used}") - except Exception as e: - print(f"[QUOTA] Error ending session for {username}: {e}") + session_id = getattr(self, "usage_session_id", None) + self.usage_session_id = None + + if session_id: + duration, quota_used = quota_manager.end_usage_session(session_id, quota_rates) + print(f"[USAGE] Session ended for {username}. Duration: {duration} min, Quota used: {quota_used}") + else: + # Hub may have restarted and lost in-memory session id — find and close any active session for this user + active = quota_manager.get_active_session(username) + if active: + duration, quota_used = quota_manager.end_usage_session(active["session_id"], quota_rates) + print( + f"[USAGE] Recovered session for {username}. Duration: {duration} min, Quota used: {quota_used}" + ) + except Exception as e: + print(f"[USAGE] Error ending session for {username}: {e}") if hasattr(self, "check_timer") and self.check_timer: with contextlib.suppress(Exception): diff --git a/runtime/hub/core/stats_handlers.py b/runtime/hub/core/stats_handlers.py new file mode 100644 index 0000000..e13cdba --- /dev/null +++ b/runtime/hub/core/stats_handlers.py @@ -0,0 +1,545 @@ +# Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +# SPDX-License-Identifier: MIT + +import json +from datetime import datetime, timedelta +from functools import lru_cache + +from jupyterhub.apihandlers import APIHandler +from tornado import web + +from core.config import HubConfig +from core.database import session_scope +from core.quota.orm import UsageSession + + +@lru_cache(maxsize=1) +def _resource_label_data() -> tuple[dict[str, str], dict[str, str], dict[str, set[str]]]: + """Return cached mappings for resource and accelerator labels.""" + config = HubConfig.get() + resource_labels: dict[str, str] = {} + accelerator_labels: dict[str, str] = {} + accelerator_to_resources: dict[str, set[str]] = {} + + for key, meta in config.resources.metadata.items(): + label = meta.description or key + resource_labels[key] = label + + accel_keys = set(meta.acceleratorKeys or []) + if meta.accelerator: + accel_keys.add(meta.accelerator) + for acc_key in accel_keys: + accelerator_to_resources.setdefault(acc_key, set()).add(key) + + for key, accel in config.accelerators.items(): + accelerator_labels[key] = accel.displayName or key + + # Always ensure CPU fallback label + accelerator_labels.setdefault("cpu", "CPU") + + return (resource_labels, accelerator_labels, accelerator_to_resources) + + +def _resource_display(resource_type: str) -> str: + """Resolve a human-friendly label for a given resource or accelerator key.""" + resource_labels, accelerator_labels, accelerator_to_resources = _resource_label_data() + + if resource_type in resource_labels: + return resource_labels[resource_type] + + candidate_resources = accelerator_to_resources.get(resource_type, set()) + if len(candidate_resources) == 1: + candidate = next(iter(candidate_resources)) + return resource_labels.get(candidate, candidate) + + if resource_type in accelerator_labels: + return accelerator_labels[resource_type] + + return resource_type + + +def _accelerator_display(accelerator_key: str | None) -> str | None: + """Return a human-friendly accelerator label if available.""" + if not accelerator_key: + return None + + _, accelerator_labels, _ = _resource_label_data() + return accelerator_labels.get(accelerator_key, accelerator_key) + + +def _require_admin(handler): + if not handler.current_user.admin: + handler.set_status(403) + handler.set_header("Content-Type", "application/json") + handler.finish(json.dumps({"error": "Admin access required"})) + return False + return True + + +class StatsOverviewHandler(APIHandler): + """Summary stats for the dashboard overview cards.""" + + @web.authenticated + async def get(self): + assert self.current_user is not None + if not _require_admin(self): + return + + loop = __import__("asyncio").get_event_loop() + result = await loop.run_in_executor(None, self._query) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + def _query(self): + from jupyterhub.orm import User + + week_ago = datetime.now() - timedelta(days=7) + + total_users = self.db.query(User).count() + users_this_week = self.db.query(User).filter(User.last_activity >= week_ago).count() + + with session_scope() as session: + active_sessions = session.query(UsageSession).filter(UsageSession.status == "active").count() + total_minutes_row = session.execute( + __import__("sqlalchemy").text( + "SELECT COALESCE(SUM(duration_minutes), 0) FROM quota_usage_sessions " + "WHERE status IN ('completed', 'cleaned_up') AND duration_minutes IS NOT NULL" + ) + ).scalar() + + return { + "total_users": total_users, + "active_sessions": active_sessions, + "total_usage_minutes": int(total_minutes_row or 0), + "users_this_week": users_this_week, + } + + +class StatsUsageHandler(APIHandler): + """Usage time series for the trend line chart, supporting day/week granularity.""" + + @web.authenticated + async def get(self): + assert self.current_user is not None + if not _require_admin(self): + return + + try: + days = int(self.get_argument("days", "30")) + days = max(1, min(days, 365)) + except ValueError: + days = 30 + + granularity = self.get_argument("granularity", "day") + if granularity not in ("day", "week"): + granularity = "day" + + loop = __import__("asyncio").get_event_loop() + result = await loop.run_in_executor(None, self._query, days, granularity) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + def _query(self, days: int, granularity: str): + import sqlalchemy as sa + + now = datetime.now() + since = now - timedelta(days=days) + + if granularity == "week": + group_expr = "strftime('%Y-W%W', start_time)" + else: + group_expr = "DATE(start_time)" + + with session_scope() as session: + rows = session.execute( + sa.text( + f"SELECT {group_expr} as period, " + "COALESCE(SUM(duration_minutes), 0) as minutes, " + "COUNT(*) as sessions, " + "COUNT(DISTINCT username) as users " + "FROM quota_usage_sessions " + "WHERE status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + f"GROUP BY {group_expr} " + "ORDER BY period ASC" + ), + {"since": since}, + ).fetchall() + + by_period = {str(r[0]): (int(r[1]), int(r[2]), int(r[3])) for r in rows} + + result = [] + if granularity == "day": + d = since.date() + today = now.date() + while d <= today: + key = str(d) + mins, sess, users = by_period.get(key, (0, 0, 0)) + result.append({"date": key, "minutes": mins, "sessions": sess, "users": users}) + d += timedelta(days=1) + else: + # Iterate week by week from the Monday of the starting week + + d = since.date() + d -= timedelta(days=d.weekday()) # rewind to Monday + today = now.date() + while d <= today: + key = d.strftime("%Y-W%W") + mins, sess, users = by_period.get(key, (0, 0, 0)) + result.append({"date": key, "minutes": mins, "sessions": sess, "users": users}) + d += timedelta(weeks=1) + + return {"daily_usage": result} + + +class StatsDistributionHandler(APIHandler): + """Resource distribution and top users for pie chart and leaderboard.""" + + @web.authenticated + async def get(self): + assert self.current_user is not None + if not _require_admin(self): + return + + try: + days = int(self.get_argument("days", "30")) + days = max(1, min(days, 365)) + except ValueError: + days = 30 + + loop = __import__("asyncio").get_event_loop() + result = await loop.run_in_executor(None, self._query, days) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + def _query(self, days: int): + import sqlalchemy as sa + + since = datetime.now() - timedelta(days=days) + + with session_scope() as session: + resource_rows = session.execute( + sa.text( + "SELECT resource_type, " + "COALESCE(SUM(duration_minutes), 0) as minutes, " + "COUNT(*) as sessions, " + "COUNT(DISTINCT username) as users, " + "COALESCE(AVG(duration_minutes), 0) as avg_minutes " + "FROM quota_usage_sessions " + "WHERE status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + "GROUP BY resource_type " + "ORDER BY minutes DESC" + ), + {"since": since}, + ).fetchall() + + top_user_rows = session.execute( + sa.text( + "SELECT username, " + "COALESCE(SUM(duration_minutes), 0) as total_minutes, " + "COUNT(*) as sessions " + "FROM quota_usage_sessions " + "WHERE status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + "GROUP BY username " + "ORDER BY total_minutes DESC " + "LIMIT 10" + ), + {"since": since}, + ).fetchall() + + return { + "by_resource": [ + { + "resource_type": row[0], + "resource_display": _resource_display(row[0]), + "minutes": int(row[1]), + "sessions": int(row[2]), + "users": int(row[3]), + "avg_minutes": round(float(row[4]), 1), + } + for row in resource_rows + ], + "top_users": [ + { + "username": row[0], + "total_minutes": int(row[1]), + "sessions": int(row[2]), + } + for row in top_user_rows + ], + } + + +class StatsUserHandler(APIHandler): + """Per-user usage detail: time series + resource breakdown + recent sessions.""" + + @web.authenticated + async def get(self, username: str): + assert self.current_user is not None + if not _require_admin(self): + return + + try: + days = int(self.get_argument("days", "30")) + days = max(1, min(days, 365)) + except ValueError: + days = 30 + + granularity = self.get_argument("granularity", "day") + if granularity not in ("day", "week"): + granularity = "day" + + loop = __import__("asyncio").get_event_loop() + result = await loop.run_in_executor(None, self._query, username, days, granularity) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + def _query(self, username: str, days: int, granularity: str): + import sqlalchemy as sa + + since = datetime.now() - timedelta(days=days) + + if granularity == "week": + group_expr = "strftime('%Y-W%W', start_time)" + else: + group_expr = "DATE(start_time)" + + with session_scope() as session: + # Time series for this user + usage_rows = session.execute( + sa.text( + f"SELECT {group_expr} as period, " + "COALESCE(SUM(duration_minutes), 0) as minutes, " + "COUNT(*) as sessions " + "FROM quota_usage_sessions " + "WHERE username = :username " + "AND status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + f"GROUP BY {group_expr} " + "ORDER BY period ASC" + ), + {"username": username, "since": since}, + ).fetchall() + + # Resource breakdown for this user + resource_rows = session.execute( + sa.text( + "SELECT resource_type, " + "COALESCE(SUM(duration_minutes), 0) as minutes, " + "COUNT(*) as sessions " + "FROM quota_usage_sessions " + "WHERE username = :username " + "AND status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + "GROUP BY resource_type " + "ORDER BY minutes DESC" + ), + {"username": username, "since": since}, + ).fetchall() + + # Recent sessions (last 20) + session_rows = session.execute( + sa.text( + "SELECT resource_type, accelerator_type, start_time, end_time, duration_minutes, status " + "FROM quota_usage_sessions " + "WHERE username = :username " + "AND start_time >= :since " + "ORDER BY start_time DESC " + "LIMIT 20" + ), + {"username": username, "since": since}, + ).fetchall() + + # Totals + totals_row = session.execute( + sa.text( + "SELECT COALESCE(SUM(duration_minutes), 0), COUNT(*) " + "FROM quota_usage_sessions " + "WHERE username = :username " + "AND status IN ('completed', 'cleaned_up') " + "AND start_time >= :since" + ), + {"username": username, "since": since}, + ).fetchone() + + return { + "username": username, + "total_minutes": int(totals_row[0] or 0), + "total_sessions": int(totals_row[1] or 0), + "usage": [{"date": str(r[0]), "minutes": int(r[1]), "sessions": int(r[2])} for r in usage_rows], + "by_resource": [ + { + "resource_type": r[0], + "resource_display": _resource_display(r[0]), + "minutes": int(r[1]), + "sessions": int(r[2]), + } + for r in resource_rows + ], + "recent_sessions": [ + { + "resource_type": r[0], + "resource_display": _resource_display(r[0]), + "accelerator_type": r[1], + "accelerator_display": _accelerator_display(r[1]), + "start_time": str(r[2]), + "end_time": str(r[3]) if r[3] else None, + "duration_minutes": int(r[4]) if r[4] is not None else None, + "status": r[5], + } + for r in session_rows + ], + } + + +class StatsHourlyHandler(APIHandler): + """Usage distribution by hour of day.""" + + @web.authenticated + async def get(self): + assert self.current_user is not None + if not _require_admin(self): + return + + try: + days = int(self.get_argument("days", "30")) + days = max(1, min(days, 365)) + except ValueError: + days = 30 + + try: + # tz_offset: minutes ahead of UTC (e.g. UTC+8 → 480, UTC-5 → -300) + tz_offset = int(self.get_argument("tz_offset", "0")) + tz_offset = max(-720, min(840, tz_offset)) + except ValueError: + tz_offset = 0 + + loop = __import__("asyncio").get_event_loop() + result = await loop.run_in_executor(None, self._query, days, tz_offset) + self.set_header("Content-Type", "application/json") + self.finish(json.dumps(result)) + + def _query(self, days: int, tz_offset: int): + import sqlalchemy as sa + + since = datetime.now() - timedelta(days=days) + offset_sign = "+" if tz_offset >= 0 else "-" + offset_abs = abs(tz_offset) + offset_expr = f"datetime(start_time, '{offset_sign}{offset_abs} minutes')" + + with session_scope() as session: + rows = session.execute( + sa.text( + f"SELECT CAST(strftime('%H', {offset_expr}) AS INTEGER) as hour, " + "COUNT(*) as sessions, " + "COALESCE(SUM(duration_minutes), 0) as minutes " + "FROM quota_usage_sessions " + "WHERE status IN ('completed', 'cleaned_up') " + "AND start_time >= :since " + "GROUP BY hour ORDER BY hour ASC" + ), + {"since": since}, + ).fetchall() + + by_hour = {int(r[0]): {"sessions": int(r[1]), "minutes": int(r[2])} for r in rows} + return { + "hourly": [ + { + "hour": h, + "sessions": by_hour.get(h, {}).get("sessions", 0), + "minutes": by_hour.get(h, {}).get("minutes", 0), + } + for h in range(24) + ] + } + + +IDLE_WARN_MINUTES = 120 # sessions longer than this are flagged as potentially idle + + +def _active_sessions_data() -> dict: + import sqlalchemy as sa + + cutoff = datetime.now() - timedelta(minutes=30) + + with session_scope() as session: + active_rows = session.execute( + sa.text( + "SELECT q.username, q.resource_type, q.accelerator_type, q.start_time " + "FROM quota_usage_sessions q " + "JOIN spawners s ON s.server_id IS NOT NULL " + "JOIN users u ON u.id = s.user_id AND LOWER(u.name) = LOWER(q.username) " + "WHERE q.status = 'active' " + "ORDER BY q.start_time ASC" + ) + ).fetchall() + + pending_rows = session.execute( + sa.text( + "SELECT u.name, s.started " + "FROM spawners s " + "JOIN users u ON u.id = s.user_id " + "WHERE s.server_id IS NULL AND s.started IS NOT NULL AND s.started > :cutoff " + "ORDER BY s.started ASC" + ), + {"cutoff": cutoff}, + ).fetchall() + + now = datetime.now() + return { + "active_sessions": [ + { + "username": r[0], + "resource_type": r[1], + "resource_display": _resource_display(r[1]), + "accelerator_type": r[2], + "accelerator_display": _accelerator_display(r[2]), + "start_time": str(r[3]), + "elapsed_minutes": int((now - datetime.fromisoformat(str(r[3]))).total_seconds() / 60), + "idle_warning": int((now - datetime.fromisoformat(str(r[3]))).total_seconds() / 60) + >= IDLE_WARN_MINUTES, + } + for r in active_rows + ], + "pending_spawns": [ + { + "username": r[0], + "started": str(r[1]), + "waiting_minutes": int((now - datetime.fromisoformat(str(r[1]))).total_seconds() / 60), + } + for r in pending_rows + ], + } + + +class StatsActiveSSEHandler(APIHandler): + """SSE stream of currently active sessions, pushed every 5 seconds.""" + + def check_xsrf_cookie(self): + # EventSource cannot send custom headers, so XSRF is skipped for this read-only GET + pass + + @web.authenticated + async def get(self): + import asyncio + + assert self.current_user is not None + if not _require_admin(self): + return + + self.set_header("Content-Type", "text/event-stream") + self.set_header("Cache-Control", "no-cache") + self.set_header("X-Accel-Buffering", "no") + self.set_header("Connection", "keep-alive") + + loop = asyncio.get_event_loop() + try: + while True: + data = await loop.run_in_executor(None, _active_sessions_data) + self.write(f"data: {json.dumps(data)}\n\n") + await self.flush() + await asyncio.sleep(5) + except Exception: + pass diff --git a/runtime/hub/frontend/apps/admin/package.json b/runtime/hub/frontend/apps/admin/package.json index 4413c84..3d309e3 100644 --- a/runtime/hub/frontend/apps/admin/package.json +++ b/runtime/hub/frontend/apps/admin/package.json @@ -17,13 +17,16 @@ "react-bootstrap": "^2.10.10", "react-dom": "catalog:", "react-router-dom": "^7.13.0", - "react-select": "^5.10.2" + "react-select": "^5.10.2", + "recharts": "catalog:" }, "devDependencies": { "@eslint/js": "catalog:", + "@playwright/test": "^1.58.2", "@types/node": "catalog:", "@types/react": "catalog:", "@types/react-dom": "catalog:", + "@tailwindcss/vite": "catalog:", "@vitejs/plugin-react": "catalog:", "eslint": "catalog:", "eslint-plugin-react-hooks": "catalog:", diff --git a/runtime/hub/frontend/apps/admin/src/App.tsx b/runtime/hub/frontend/apps/admin/src/App.tsx index b9d7ba3..00cc74d 100644 --- a/runtime/hub/frontend/apps/admin/src/App.tsx +++ b/runtime/hub/frontend/apps/admin/src/App.tsx @@ -21,6 +21,7 @@ import { Container } from 'react-bootstrap'; import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'; import { UserList } from './pages/UserList'; import { GroupList } from './pages/GroupList'; +import { Dashboard } from './pages/Dashboard'; import 'bootstrap/dist/css/bootstrap.min.css'; import 'bootstrap-icons/font/bootstrap-icons.css'; @@ -37,6 +38,7 @@ function App() { } /> } /> + } /> } /> diff --git a/runtime/hub/frontend/apps/admin/src/components/BatchPasswordModal.tsx b/runtime/hub/frontend/apps/admin/src/components/BatchPasswordModal.tsx new file mode 100644 index 0000000..46fcf43 --- /dev/null +++ b/runtime/hub/frontend/apps/admin/src/components/BatchPasswordModal.tsx @@ -0,0 +1,300 @@ +// Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +import { useState, useMemo } from 'react'; +import { Modal, Button, Form, Alert, Spinner, InputGroup, Badge } from 'react-bootstrap'; +import * as api from '@auplc/shared'; + +interface Props { + show: boolean; + usernames: string[]; + onHide: () => void; +} + +interface PasswordResult { + username: string; + password: string; + status: 'success' | 'failed'; + error?: string; +} + +export function BatchPasswordModal({ show, usernames, onHide }: Props) { + const [generateRandom, setGenerateRandom] = useState(true); + const [password, setPassword] = useState(''); + const [forceChange, setForceChange] = useState(true); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [results, setResults] = useState([]); + const [step, setStep] = useState<'input' | 'result'>('input'); + + const generateRandomPassword = () => { + const chars = 'ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz23456789'; + let result = ''; + for (let i = 0; i < 16; i++) { + result += chars.charAt(Math.floor(Math.random() * chars.length)); + } + return result; + }; + + const handleSubmit = async () => { + setError(null); + setLoading(true); + + try { + const entries = usernames.map(username => ({ + username, + password: generateRandom ? generateRandomPassword() : password, + })); + + const response = await api.batchSetPasswords(entries, forceChange); + + const pwResults: PasswordResult[] = entries.map(entry => { + const r = response.results.find(r => r.username === entry.username); + return { + username: entry.username, + password: entry.password, + status: r?.status === 'success' ? 'success' as const : 'failed' as const, + error: r?.error, + }; + }); + + setResults(pwResults); + setStep('result'); + + if (response.failed > 0) { + setError(`${response.failed} password(s) failed to set`); + } + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to set passwords'); + } finally { + setLoading(false); + } + }; + + const handleClose = () => { + setGenerateRandom(true); + setPassword(''); + setForceChange(true); + setError(null); + setResults([]); + setStep('input'); + onHide(); + }; + + const successResults = useMemo( + () => results.filter(r => r.status === 'success'), + [results] + ); + + const copyToClipboard = () => { + const text = successResults + .map(r => `${r.username}\t${r.password}`) + .join('\n'); + navigator.clipboard.writeText(text); + }; + + const downloadCsv = () => { + const header = 'username,password,status\n'; + const rows = results + .map(r => `${r.username},${r.status === 'success' ? r.password : ''},${r.status}`) + .join('\n'); + const blob = new Blob([header + rows], { type: 'text/csv;charset=utf-8;' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `passwords-${new Date().toISOString().slice(0, 10)}.csv`; + a.click(); + URL.revokeObjectURL(url); + }; + + return ( + + + + {step === 'input' ? `Reset Passwords (${usernames.length} users)` : 'Password Reset Results'} + + + + {step === 'input' ? ( +
+ {error && {error}} + + + This will reset passwords for {usernames.length} selected user(s). + Users will need to use the new passwords to log in. + + +
+ Users:{' '} + {usernames.slice(0, 10).map(name => ( + {name} + ))} + {usernames.length > 10 && ( + +{usernames.length - 10} more + )} +
+ + + setGenerateRandom(e.target.checked)} + /> + + + {!generateRandom && ( + + Password (same for all users) + + setPassword(e.target.value)} + placeholder="Enter password" + minLength={8} + /> + + + + Minimum 8 characters + + + )} + + + setForceChange(e.target.checked)} + /> + +
+ ) : ( +
+ {(() => { + const successCount = successResults.length; + const failedCount = results.length - successCount; + return ( + <> + {successCount > 0 && ( + + {successCount} password(s) reset successfully. + + )} + {failedCount > 0 && ( + + {failedCount} password(s) failed to set. + + )} + + ); + })()} + {error && ( + + {error} + + )} +

+ Copy the new credentials and share them with the users: +

+
+ + + + + + + + + + {results.map((r) => ( + + + + + + ))} + +
UsernamePasswordStatus
{r.username} + {r.status === 'success' ? ( + {r.password} + ) : ( + (failed) + )} + + {r.status === 'success' ? ( + OK + ) : ( + Failed + )} +
+
+ {forceChange && ( + + Users will be prompted to change their password on next login. + + )} +
+ )} +
+ + {step === 'input' ? ( + <> + + + + ) : ( + <> + + + + + )} + +
+ ); +} diff --git a/runtime/hub/frontend/apps/admin/src/components/CreateUserModal.tsx b/runtime/hub/frontend/apps/admin/src/components/CreateUserModal.tsx index d19d5e1..a1b87c9 100644 --- a/runtime/hub/frontend/apps/admin/src/components/CreateUserModal.tsx +++ b/runtime/hub/frontend/apps/admin/src/components/CreateUserModal.tsx @@ -17,22 +17,28 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -import { useState } from 'react'; -import { Modal, Button, Form, Alert, Spinner, InputGroup } from 'react-bootstrap'; +import { useState, useCallback, useMemo } from 'react'; +import { Modal, Button, Form, Alert, Spinner, InputGroup, Row, Col, Badge } from 'react-bootstrap'; import * as api from '@auplc/shared'; interface Props { show: boolean; onHide: () => void; onSuccess: () => void; + quotaEnabled?: boolean; + defaultQuota?: number; } interface CreatedUser { username: string; password: string; + status: 'created' | 'existed' | 'failed'; + passwordSet: boolean; + quotaSet: boolean; + error?: string; } -export function CreateUserModal({ show, onHide, onSuccess }: Props) { +export function CreateUserModal({ show, onHide, onSuccess, quotaEnabled = false, defaultQuota = 0 }: Props) { const [usernames, setUsernames] = useState(''); const [password, setPassword] = useState(''); const [generateRandom, setGenerateRandom] = useState(true); @@ -42,6 +48,16 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) { const [error, setError] = useState(null); const [createdUsers, setCreatedUsers] = useState([]); const [step, setStep] = useState<'input' | 'result'>('input'); + const [prefix, setPrefix] = useState(''); + const [count, setCount] = useState(10); + const [startNum, setStartNum] = useState(1); + const [quotaValue, setQuotaValue] = useState(String(defaultQuota || 0)); + + const handleGenerateNames = useCallback(() => { + if (!prefix.trim()) return; + const names = Array.from({ length: count }, (_, i) => `${prefix.trim()}${startNum + i}`); + setUsernames(names.join('\n')); + }, [prefix, count, startNum]); const generateRandomPassword = () => { const chars = 'ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz23456789'; @@ -64,27 +80,119 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) { .filter(n => n.length > 0); if (names.length === 0) { - throw new Error('Please enter at least one username'); + setError('Please enter at least one username'); + setLoading(false); + return; } - const results: CreatedUser[] = []; + // Generate passwords for all users upfront + const passwordMap = new Map( + names.map(username => [ + username, + generateRandom ? generateRandomPassword() : password, + ]) + ); - for (const username of names) { - // Create user - await api.createUser(username, isAdmin); + // Initialize result tracking + const results: Map = new Map( + names.map(username => [ + username, + { username, password: passwordMap.get(username)!, status: 'created' as const, passwordSet: false, quotaSet: false }, + ]) + ); - // Set password - const pwd = generateRandom ? generateRandomPassword() : password; - await api.setPassword({ + const warnings: string[] = []; + + // Step 1: Batch create users + let createdNames: string[] = []; + try { + const created = await api.createUsers(names, isAdmin); + // API returns only newly created users; existing ones are silently skipped + createdNames = created.map(u => u.name); + const existedNames = names.filter(n => !createdNames.includes(n)); + for (const name of existedNames) { + const r = results.get(name)!; + r.status = 'existed'; + } + if (existedNames.length > 0) { + warnings.push(`${existedNames.length} user(s) already existed: ${existedNames.join(', ')}`); + } + } catch (err) { + const msg = err instanceof Error ? err.message : 'Unknown error'; + // If 409 (all users exist), mark them all as existed and continue with password/quota + if (msg.includes('already exist')) { + for (const name of names) { + results.get(name)!.status = 'existed'; + } + createdNames = []; + warnings.push(`All ${names.length} user(s) already existed`); + } else { + // Fatal error - can't determine which users were created + setError(`Failed to create users: ${msg}`); + setLoading(false); + return; + } + } + + // Step 2: Set passwords (only for newly created users) + if (createdNames.length > 0) { + const passwordEntries = createdNames.map(username => ({ username, - password: pwd, - force_change: forceChange, - }); + password: passwordMap.get(username)!, + })); + + try { + const pwResult = await api.batchSetPasswords(passwordEntries, forceChange); + for (const r of pwResult.results) { + const entry = results.get(r.username); + if (entry) { + if (r.status === 'success') { + entry.passwordSet = true; + } else { + entry.error = r.error || 'Password set failed'; + } + } + } + if (pwResult.failed > 0) { + warnings.push(`${pwResult.failed} password(s) failed to set`); + } + } catch (err) { + const msg = err instanceof Error ? err.message : 'Unknown error'; + warnings.push(`Password setting failed: ${msg}`); + } + } + + // Step 3: Set quota if enabled (only for newly created users) + if (quotaEnabled && createdNames.length > 0) { + const input = quotaValue.trim(); + const isUnlimited = input === '-1' || input === '∞' || input.toLowerCase() === 'unlimited'; + const amount = isUnlimited ? 0 : (parseInt(input) || 0); + if (isUnlimited || amount > 0) { + try { + await api.batchSetQuota( + createdNames.map(username => ({ + username, + amount, + ...(isUnlimited ? { unlimited: true } : {}), + })) + ); + for (const name of createdNames) { + const entry = results.get(name); + if (entry) entry.quotaSet = true; + } + } catch (err) { + const msg = err instanceof Error ? err.message : 'Unknown error'; + warnings.push(`Quota setting failed: ${msg}`); + } + } + } - results.push({ username, password: pwd }); + // Set warnings as non-fatal error for display + if (warnings.length > 0) { + setError(warnings.join('\n')); } - setCreatedUsers(results); + setCreatedUsers(Array.from(results.values())); setStep('result'); onSuccess(); } catch (err) { @@ -103,21 +211,44 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) { setError(null); setCreatedUsers([]); setStep('input'); + setPrefix(''); + setCount(10); + setStartNum(1); + setQuotaValue(String(defaultQuota || 0)); onHide(); }; + const usersWithPasswords = useMemo( + () => createdUsers.filter(u => u.passwordSet), + [createdUsers] + ); + const copyToClipboard = () => { - const text = createdUsers + const text = usersWithPasswords .map(u => `${u.username}\t${u.password}`) .join('\n'); navigator.clipboard.writeText(text); }; + const downloadCsv = () => { + const header = 'username,password,status\n'; + const rows = createdUsers + .map(u => `${u.username},${u.passwordSet ? u.password : ''},${u.status}`) + .join('\n'); + const blob = new Blob([header + rows], { type: 'text/csv;charset=utf-8;' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `users-${new Date().toISOString().slice(0, 10)}.csv`; + a.click(); + URL.revokeObjectURL(url); + }; + return ( - {step === 'input' ? 'Create Users' : 'Users Created'} + {step === 'input' ? 'Create Users' : 'Results'} @@ -125,6 +256,57 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) {
{error && {error}} + + Quick generate + + + setPrefix(e.target.value)} + placeholder="Prefix, e.g. student" + /> + + + + from + setStartNum(parseInt(e.target.value) || 1)} + style={{ width: 70 }} + /> + + + + + count + setCount(parseInt(e.target.value) || 1)} + style={{ width: 70 }} + /> + + + + + + + + Usernames (one per line) + {quotaEnabled && ( + + Initial Quota + setQuotaValue(e.target.value)} + placeholder="e.g. 100, or -1 for unlimited" + /> + + Leave as 0 to skip. Use -1 or "unlimited" for unlimited. + + + )} + setIsAdmin(e.target.checked)} /> +
) : (
- - Successfully created {createdUsers.length} user(s)! - + {(() => { + const newUsers = createdUsers.filter(u => u.status === 'created'); + const existedUsers = createdUsers.filter(u => u.status === 'existed'); + const failedPw = newUsers.filter(u => !u.passwordSet).length; + return ( + <> + {newUsers.length > 0 && ( + + {newUsers.length} user(s) created successfully. + + )} + {existedUsers.length > 0 && ( + + {existedUsers.length} user(s) already existed and were skipped (no changes made). + + )} + {failedPw > 0 && ( + + {failedPw} newly created user(s) failed to set password. + + )} + {newUsers.length === 0 && existedUsers.length > 0 && ( + + No new users were created. All usernames already exist in the system. + + )} + + ); + })()} + {error && ( + + {error.split('\n').map((line, i) => ( +
{line}
+ ))}
+
+ )}

Copy the credentials below and share them with the users:

@@ -203,13 +434,35 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) { Username Password + Status {createdUsers.map((user) => ( {user.username} - {user.password} + + {user.passwordSet ? ( + {user.password} + ) : user.status === 'existed' ? ( + - + ) : ( + (failed) + )} + + + {user.status === 'created' ? ( + user.passwordSet ? ( + New + ) : ( + PW failed + ) + ) : user.status === 'existed' ? ( + Skipped + ) : ( + Failed + )} + ))} @@ -246,8 +499,11 @@ export function CreateUserModal({ show, onHide, onSuccess }: Props) { ) : ( <> - +
- + {!readOnly && ( + + )} ); }); @@ -67,10 +74,17 @@ export function EditGroupModal({ show, group, onHide, onUpdate, onDelete }: Prop const [error, setError] = useState(null); const [properties, setProperties] = useState>({}); - // Initialize state when modal opens + const isGitHubTeam = group?.source === 'github-team'; + const isSystemGroup = group?.source === 'system'; + const isProtected = isGitHubTeam || isSystemGroup; + + // Initialize state when modal opens (exclude reserved keys) const handleEnter = () => { if (group) { - setProperties({ ...group.properties }); + const userProps = Object.fromEntries( + Object.entries(group.properties).filter(([k]) => !RESERVED_KEYS.has(k)) + ); + setProperties(userProps); setError(null); } }; @@ -81,6 +95,11 @@ export function EditGroupModal({ show, group, onHide, onUpdate, onDelete }: Prop return; } + if (RESERVED_KEYS.has(newPropertyKey)) { + setError(`"${newPropertyKey}" is a reserved key`); + return; + } + setProperties(prev => { if (newPropertyKey in prev) { setError('Property key already exists'); @@ -137,48 +156,108 @@ export function EditGroupModal({ show, group, onHide, onUpdate, onDelete }: Prop } }; + const handleReleaseProtection = async () => { + if (!group) return; + + const sourceLabel = isGitHubTeam ? 'GitHub-synced' : 'system-managed'; + if (!window.confirm( + `Release protection on "${group.name}"?\n\n` + + `This will convert it from a ${sourceLabel} group to a manually managed group. ` + + `Members will become editable and the group can be deleted.\n\n` + + `Note: If a GitHub team with this name still exists, the group will be re-protected when a team member logs in.` + )) { + return; + } + + try { + setLoading(true); + setError(null); + await api.updateGroup(group.name, { release_protection: true }); + onUpdate(); + onHide(); + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to release protection'); + } finally { + setLoading(false); + } + }; + if (!group) return null; return ( - Group Properties: {group.name} + + Group Properties: {group.name} + {error && setError(null)}>{error}} + {isGitHubTeam && ( + + + + Members are auto-synced from GitHub Teams. You can add users manually, + but synced members may be re-added or removed when the user next logs in and starts a server. + + + )} + + {isSystemGroup && ( + + System-managed group — membership is read-only. + + )} + + {/* Resources (read-only, from config) */} + {group.resources && group.resources.length > 0 && ( +
+ Mapped Resources +
+ {group.resources.map(r => ( + {r} + ))} +
+ + Resource mappings are defined in values.yaml and cannot be changed from the UI. + +
+ )} + {/* Manage Properties */}
+ Properties

Properties are key-value pairs that can be used to configure group behavior.

-
-
- setNewPropertyKey(e.target.value)} - disabled={loading} - /> -
-
- setNewPropertyValue(e.target.value)} - onKeyPress={(e) => e.key === 'Enter' && handleAddProperty()} - disabled={loading} - /> -
-
- +
+
+ setNewPropertyKey(e.target.value)} + disabled={loading} + /> +
+
+ setNewPropertyValue(e.target.value)} + onKeyPress={(e) => e.key === 'Enter' && handleAddProperty()} + disabled={loading} + /> +
+
+ +
-
{Object.keys(properties).length === 0 ? ( @@ -190,6 +269,7 @@ export function EditGroupModal({ show, group, onHide, onUpdate, onDelete }: Prop propKey={key} value={value} loading={loading} + readOnly={false} onRemove={handleRemoveProperty} /> )) @@ -198,9 +278,20 @@ export function EditGroupModal({ show, group, onHide, onUpdate, onDelete }: Prop
- + {isProtected ? ( + + ) : ( + + )}
+ ); +} diff --git a/runtime/hub/frontend/apps/admin/src/components/UserDetailModal.tsx b/runtime/hub/frontend/apps/admin/src/components/UserDetailModal.tsx new file mode 100644 index 0000000..7eb67e0 --- /dev/null +++ b/runtime/hub/frontend/apps/admin/src/components/UserDetailModal.tsx @@ -0,0 +1,208 @@ +// Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +// SPDX-License-Identifier: MIT + +import { useState, useEffect } from 'react'; +import { Modal, Spinner, Alert } from 'react-bootstrap'; +import { + LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, +} from 'recharts'; +import { getUserDetail } from '@auplc/shared'; +import type { UserDetail } from '@auplc/shared'; + +const PIE_COLORS = ['#6366f1', '#8b5cf6', '#ec4899', '#f59e0b', '#10b981', '#3b82f6', '#ef4444']; + +function formatMinutes(minutes: number): string { + if (minutes < 60) return `${minutes}m`; + const h = Math.floor(minutes / 60); + const m = minutes % 60; + return m > 0 ? `${h}h ${m}m` : `${h}h`; +} + +function formatResourceLabel(resourceType: string, resourceDisplay?: string | null): string { + if (resourceDisplay && resourceDisplay !== resourceType) { + return `${resourceDisplay} (${resourceType})`; + } + return resourceDisplay ?? resourceType; +} + +function formatAcceleratorLabel(acceleratorType?: string | null, acceleratorDisplay?: string | null): string { + if (acceleratorDisplay && acceleratorDisplay !== acceleratorType) { + return acceleratorType ? `${acceleratorDisplay} (${acceleratorType})` : acceleratorDisplay; + } + return acceleratorDisplay ?? acceleratorType ?? ''; +} + +interface UserDetailModalProps { + username: string | null; + days?: number; + granularity?: 'day' | 'week'; + onClose: () => void; +} + +export function UserDetailModal({ username, days = 30, granularity = 'day', onClose }: UserDetailModalProps) { + const [detail, setDetail] = useState(null); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + useEffect(() => { + if (!username) return; + setLoading(true); + setError(null); + setDetail(null); + getUserDetail(username, days, granularity) + .then(setDetail) + .catch(e => setError(e instanceof Error ? e.message : 'Failed to load')) + .finally(() => setLoading(false)); + }, [username, days, granularity]); + + return ( + + + + + {username} + + + + {loading && ( +
+ +
+ )} + {error && {error}} + {detail && !loading && ( + <> + {/* Summary row */} +
+
+

Total Usage

+

+ {formatMinutes(detail.total_minutes)} +

+
+
+

Sessions

+

+ {detail.total_sessions} +

+
+
+

Avg per Session

+

+ {detail.total_sessions > 0 + ? formatMinutes(Math.round(detail.total_minutes / detail.total_sessions)) + : '—'} +

+
+
+ + {/* Usage chart */} + {detail.usage.length > 0 ? ( +
+

+ Usage over time +

+ + + + d.slice(5)} /> + + [`${v} min`, 'Usage']} + contentStyle={{ backgroundColor: 'var(--bs-body-bg)', border: '1px solid var(--bs-border-color)', color: 'var(--bs-body-color)' }} + /> + + + +
+ ) : ( +

No usage data for this period

+ )} + + {/* Resource breakdown */} + {detail.by_resource.length > 0 && ( +
+

+ By resource +

+
+ {detail.by_resource.map((r, i) => ( + + {formatResourceLabel(r.resource_type, r.resource_display)} · {formatMinutes(r.minutes)} · {r.sessions} sessions + + ))} +
+
+ )} + + {/* Recent sessions */} + {detail.recent_sessions.length > 0 && ( +
+

+ Recent sessions +

+
+ + + + + + + + + + + {detail.recent_sessions.map((s, i) => { + const showResourceCode = Boolean(s.resource_display && s.resource_display !== s.resource_type); + const acceleratorLabel = formatAcceleratorLabel(s.accelerator_type, s.accelerator_display); + const showAcceleratorCode = + Boolean(s.accelerator_display && s.accelerator_type && s.accelerator_display !== s.accelerator_type); + return ( + + + + + + + ); + })} + +
ResourceStartDurationStatus
+
+ {s.resource_display ?? s.resource_type} + {showResourceCode && ( + + {s.resource_type} + + )} +
+ {acceleratorLabel && ( +
+ {acceleratorLabel} + {showAcceleratorCode && ( + + {s.accelerator_type} + + )} +
+ )} +
+ {s.start_time.slice(0, 16).replace('T', ' ')} + {s.duration_minutes != null ? formatMinutes(s.duration_minutes) : '—'} + + {s.status} + +
+
+
+ )} + + )} +
+
+ ); +} diff --git a/runtime/hub/frontend/apps/admin/src/index.css b/runtime/hub/frontend/apps/admin/src/index.css index 6b5b190..0653f88 100644 --- a/runtime/hub/frontend/apps/admin/src/index.css +++ b/runtime/hub/frontend/apps/admin/src/index.css @@ -1,9 +1,15 @@ /* Custom styles for JupyterHub Admin UI */ /* Bootstrap handles most styling */ +@import "tailwindcss" prefix(tw); + + body { margin: 0; min-height: 100vh; + background-color: var(--bs-body-bg); + color: var(--bs-body-color); + transition: background-color 0.2s, color 0.2s; } /* Ensure code blocks are visible in both light and dark modes */ diff --git a/runtime/hub/frontend/apps/admin/src/pages/Dashboard.tsx b/runtime/hub/frontend/apps/admin/src/pages/Dashboard.tsx new file mode 100644 index 0000000..2c0c33d --- /dev/null +++ b/runtime/hub/frontend/apps/admin/src/pages/Dashboard.tsx @@ -0,0 +1,490 @@ +// Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +// SPDX-License-Identifier: MIT + +import { useState, useEffect, useCallback, useRef } from 'react'; +import { Spinner, Alert } from 'react-bootstrap'; +import { + LineChart, Line, BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer, +} from 'recharts'; +import { NavBar } from '../components/NavBar'; +import { UserDetailModal } from '../components/UserDetailModal'; +import { + getDashboardOverview, + getUsageTimeSeries, + getDistribution, + getHourlyDistribution, + createActiveSessionsSSE, + stopServer, +} from '@auplc/shared'; +import type { + DashboardOverview, + DailyUsage, + ResourceDistribution, + TopUser, + ActiveSession, + PendingSpawn, + HourlyUsage, +} from '@auplc/shared'; + +const PIE_COLORS = ['#6366f1', '#8b5cf6', '#ec4899', '#f59e0b', '#10b981', '#3b82f6', '#ef4444']; + +type Granularity = 'day' | 'week'; + +const GRANULARITY_OPTIONS: { label: string; value: Granularity }[] = [ + { label: 'Daily', value: 'day' }, + { label: 'Weekly', value: 'week' }, +]; + +function toDateStr(d: Date): string { + return d.toISOString().slice(0, 10); +} + +function daysBetween(start: string, end: string): number { + const ms = new Date(end).getTime() - new Date(start).getTime(); + return Math.max(1, Math.round(ms / 86400000)); +} + +function formatMinutes(minutes: number): string { + if (minutes < 60) return `${minutes}m`; + const h = Math.floor(minutes / 60); + const m = minutes % 60; + return m > 0 ? `${h}h ${m}m` : `${h}h`; +} + +function formatResourceLabel(resourceType: string, resourceDisplay?: string | null): string { + if (resourceDisplay && resourceDisplay !== resourceType) { + return `${resourceDisplay} (${resourceType})`; + } + return resourceDisplay ?? resourceType; +} + +function formatAcceleratorLabel(acceleratorType?: string | null, acceleratorDisplay?: string | null): string { + if (acceleratorDisplay && acceleratorDisplay !== acceleratorType) { + return acceleratorType ? `${acceleratorDisplay} (${acceleratorType})` : acceleratorDisplay; + } + return acceleratorDisplay ?? acceleratorType ?? ''; +} + +interface StatCardProps { + title: string; + value: string | number; + icon: string; + color: string; +} + +function StatCard({ title, value, icon, color }: StatCardProps) { + return ( +
+
+ +
+
+

{title}

+

{value}

+
+
+ ); +} + +// ─── Main Dashboard ─────────────────────────────────────────────────────────── + +export function Dashboard() { + const today = toDateStr(new Date()); + const [startDate, setStartDate] = useState(() => toDateStr(new Date(Date.now() - 30 * 86400000))); + const [endDate, setEndDate] = useState(today); + const [granularity, setGranularity] = useState('day'); + const days = daysBetween(startDate, endDate); + const [overview, setOverview] = useState(null); + const [dailyUsage, setDailyUsage] = useState([]); + const [byResource, setByResource] = useState([]); + const [topUsers, setTopUsers] = useState([]); + const [hourlyUsage, setHourlyUsage] = useState([]); + const [activeSessions, setActiveSessions] = useState([]); + const [pendingSpawns, setPendingSpawns] = useState([]); + const [loading, setLoading] = useState(true); + const [chartLoading, setChartLoading] = useState(false); + const [error, setError] = useState(null); + const [selectedUser, setSelectedUser] = useState(null); + + // SSE: live active sessions + pending spawns + useEffect(() => { + const es = createActiveSessionsSSE(({ active_sessions, pending_spawns }) => { + setActiveSessions(active_sessions); + setPendingSpawns(pending_spawns); + }); + return () => es.close(); + }, []); + + // Load overview + distribution + initial chart when date range changes + const loadAll = useCallback(async () => { + try { + setLoading(true); + setError(null); + const [ov, usage, dist, hourly] = await Promise.all([ + getDashboardOverview(), + getUsageTimeSeries(days, granularity), + getDistribution(days), + getHourlyDistribution(days), + ]); + setOverview(ov); + setDailyUsage(usage.daily_usage); + setByResource(dist.by_resource); + setTopUsers(dist.top_users); + setHourlyUsage(hourly.hourly); + } catch (e) { + setError(e instanceof Error ? e.message : 'Failed to load dashboard data'); + } finally { + setLoading(false); + } + }, [days]); // eslint-disable-line react-hooks/exhaustive-deps + + // Load only time series when granularity changes (skip on initial mount, loadAll handles it) + const isFirstRender = useRef(true); + useEffect(() => { + if (isFirstRender.current) { isFirstRender.current = false; return; } + setChartLoading(true); + getUsageTimeSeries(days, granularity) + .then(u => setDailyUsage(u.daily_usage)) + .catch(e => setError(e instanceof Error ? e.message : 'Failed to load chart data')) + .finally(() => setChartLoading(false)); + }, [granularity]); // eslint-disable-line react-hooks/exhaustive-deps + + useEffect(() => { loadAll(); }, [loadAll]); + + return ( +
+ + + {/* Header row */} +
+

Usage Dashboard

+
+ setStartDate(e.target.value)} + /> + + setEndDate(e.target.value)} + /> +
+
+ + {error && {error}} + + {loading ? ( +
+ +
+ ) : ( + <> + {/* Summary cards */} +
+ + + + +
+ + {/* Active Now + Pending Spawns */} +
+ {/* Active Now */} +
+
+
Active Now
+ + + Live + + {activeSessions.length} +
+ {activeSessions.length === 0 ? ( +

No active sessions

+ ) : ( + + + + + + + + + + + + {activeSessions.map((s, i) => { + const showResourceCode = Boolean(s.resource_display && s.resource_display !== s.resource_type); + const acceleratorLabel = formatAcceleratorLabel(s.accelerator_type, s.accelerator_display); + const showAcceleratorCode = + Boolean(s.accelerator_display && s.accelerator_type && s.accelerator_display !== s.accelerator_type); + return ( + + + + + + + + ); + })} + +
UserCourseStartedElapsed
setSelectedUser(s.username)}> + + {s.username} + + +
+ {formatResourceLabel(s.resource_type, s.resource_display)} + {showResourceCode && ( + + {s.resource_type} + + )} +
+ {acceleratorLabel && ( +
+ {acceleratorLabel} + {showAcceleratorCode && ( + + {s.accelerator_type} + + )} +
+ )} +
+ {new Date(s.start_time + 'Z').toLocaleString([], { month: '2-digit', day: '2-digit', hour: '2-digit', minute: '2-digit' })} + + {s.idle_warning && } + {formatMinutes(s.elapsed_minutes)} + + +
+ )} +
+ + {/* Pending Spawns */} +
+
+
Spawning
+ + + Live + + {pendingSpawns.length} +
+ {pendingSpawns.length === 0 ? ( +

No pending spawns

+ ) : ( +
+ {pendingSpawns.map((p, i) => ( +
+ + + {p.username} + + {formatMinutes(p.waiting_minutes)} waiting +
+ ))} +
+ )} +
+
+ + {/* Usage trend — full width */} +
+
+
Usage (minutes)
+
+ {GRANULARITY_OPTIONS.map((g) => ( + + ))} +
+
+ {chartLoading ? ( +
+ ) : dailyUsage.length === 0 ? ( +

No data for this period

+ ) : ( + + + + d.slice(5)} /> + + + name === 'Minutes' ? [`${v} min`, name] : [v, name]} + contentStyle={{ backgroundColor: 'var(--bs-body-bg)', border: '1px solid var(--bs-border-color)', color: 'var(--bs-body-color)' }} + /> + + + + + + )} +
+ + {/* Hourly usage distribution — full width */} +
+
+ Sessions by Hour of Day +
+ {hourlyUsage.every(h => h.sessions === 0) ? ( +

No data for this period

+ ) : ( + + + + `${h}:00`} interval={2} /> + + [v, name === 'sessions' ? 'Sessions' : name]} + labelFormatter={h => `${h}:00 – ${h}:59`} + contentStyle={{ backgroundColor: 'var(--bs-body-bg)', border: '1px solid var(--bs-border-color)', color: 'var(--bs-body-color)' }} + /> + + + + )} +
+ + {/* Course stats + Top users — two columns */} +
+ {/* Course ranking */} +
+
+ Course Usage +
+ {byResource.length === 0 ? ( +

No data for this period

+ ) : (() => { + const maxMin = Math.max(...byResource.map(r => r.minutes)); + return ( +
+ {byResource.map((r, i) => ( +
+
+ + + {formatResourceLabel(r.resource_type, r.resource_display)} + + + {formatMinutes(r.minutes)} · {r.sessions} sessions · avg {formatMinutes(Math.round(r.avg_minutes))} + +
+
+
+
+
+ ))} +
+ ); + })()} +
+ + {/* Top users */} +
+
+ Top Users +
+ {topUsers.length === 0 ? ( +

No data for this period

+ ) : ( + + + + + + + + + + + + {topUsers.map((u, i) => ( + setSelectedUser(u.username)} + > + + + + + + + ))} + +
#UsernameTotal UsageSessionsAvg
{i + 1} + + {u.username} + + {formatMinutes(u.total_minutes)}{u.sessions}{formatMinutes(Math.round(u.total_minutes / u.sessions))}
+ )} +
+
+ + )} + + {/* Per-user detail modal */} + setSelectedUser(null)} + /> +
+ ); +} diff --git a/runtime/hub/frontend/apps/admin/src/pages/GroupList.tsx b/runtime/hub/frontend/apps/admin/src/pages/GroupList.tsx index dc26721..32b8c8e 100644 --- a/runtime/hub/frontend/apps/admin/src/pages/GroupList.tsx +++ b/runtime/hub/frontend/apps/admin/src/pages/GroupList.tsx @@ -18,8 +18,8 @@ // SOFTWARE. import { useState, useEffect, useCallback, useMemo, memo } from 'react'; -import { useNavigate } from 'react-router-dom'; -import { Table, Button, Form, InputGroup, Alert, Spinner, Modal } from 'react-bootstrap'; +import { NavBar } from '../components/NavBar'; +import { Table, Button, Form, InputGroup, Alert, Spinner, Modal, Badge } from 'react-bootstrap'; import AsyncSelect from 'react-select/async'; import type { MultiValue, ActionMeta, StylesConfig } from 'react-select'; import type { Group } from '@auplc/shared'; @@ -103,6 +103,42 @@ interface UserOption { label: string; } +const COLLAPSED_LIMIT = 3; + +function ResourceBadges({ resources }: { resources: string[] }) { + const [expanded, setExpanded] = useState(false); + if (resources.length === 0) return --; + const visible = expanded ? resources : resources.slice(0, COLLAPSED_LIMIT); + const hidden = resources.length - COLLAPSED_LIMIT; + return ( +
+ {visible.map(r => {r})} + {!expanded && hidden > 0 && ( + setExpanded(true)} + title="Show all" + > + +{hidden} + + )} + {expanded && resources.length > COLLAPSED_LIMIT && ( + setExpanded(false)} + title="Collapse" + > + ▲ + + )} +
+ ); +} + // Memoized GroupRow component with inline member management interface GroupRowProps { group: Group; @@ -117,6 +153,9 @@ const GroupRow = memo(function GroupRow({ group, onEdit, onMembersChange, loadUs document.documentElement.getAttribute('data-bs-theme') === 'dark' ); + const isGitHubTeam = group.source === 'github-team'; + const isReadOnly = group.source === 'system'; + // Watch for theme changes useEffect(() => { const observer = new MutationObserver(() => { @@ -150,15 +189,12 @@ const GroupRow = memo(function GroupRow({ group, onEdit, onMembersChange, loadUs setIsUpdating(true); try { if (actionMeta.action === 'select-option' && actionMeta.option) { - // Add user to group await api.addUserToGroup(group.name, actionMeta.option.value); onMembersChange(group.name, [...group.users, actionMeta.option.value]); } else if (actionMeta.action === 'remove-value' && actionMeta.removedValue) { - // Remove user from group await api.removeUserFromGroup(group.name, actionMeta.removedValue.value); onMembersChange(group.name, group.users.filter(u => u !== actionMeta.removedValue!.value)); } else if (actionMeta.action === 'clear') { - // Remove all users for (const user of group.users) { await api.removeUserFromGroup(group.name, user); } @@ -173,7 +209,20 @@ const GroupRow = memo(function GroupRow({ group, onEdit, onMembersChange, loadUs return ( - {group.name} + +
+ {group.name} + {isGitHubTeam ? ( + + GitHub + + ) : group.source === 'system' ? ( + System + ) : ( + Manual + )} +
+ isMulti @@ -182,17 +231,24 @@ const GroupRow = memo(function GroupRow({ group, onEdit, onMembersChange, loadUs value={currentMembers} loadOptions={loadOptions} onChange={handleChange} - isDisabled={isUpdating} + isDisabled={isUpdating || isReadOnly} + isClearable={!isReadOnly} isLoading={isUpdating} - placeholder="Type to search and add users..." + placeholder={isReadOnly ? 'System-managed members' : (isGitHubTeam ? 'Add users (synced members are auto-managed)...' : 'Type to search and add users...')} noOptionsMessage={({ inputValue }) => inputValue ? 'No users found' : 'Type to search users' } loadingMessage={() => 'Searching...'} menuPortalTarget={document.body} styles={getSelectStyles(isDark)} + {...(isReadOnly && { + components: { MultiValueRemove: () => null }, + })} /> + + + + {githubOrg && ( + <> + + + {!showInfo && ( + + )} + + )}
- -
+ {/* Group behavior info */} + {githubOrg && showInfo && ( + { setShowInfo(false); localStorage.setItem('grouplist-hide-info', '1'); }}> + + Groups with GitHub badge are synced from{' '} + + {githubOrg} + {' '} + organization teams. Synced members are auto-managed by GitHub, but you can manually add + users (e.g. native users) to grant them the same resources. + Team data is captured at login, and group membership is updated when the user starts a server + — changes on GitHub may not appear until the user re-logs in and spawns. + Use "Sync Now" to immediately refresh all users' team memberships. + If a manually created group shares its name with a GitHub team, it will be automatically converted + to a GitHub-managed group when a team member logs in and spawns. Use "Release Protection" in group + properties to convert a protected group back to manual management. + + )} + {error && ( setError(null)}> {error} )} + {syncResult && ( + setSyncResult(null)}> + {syncResult} + + )} + {/* Search */}
@@ -390,6 +523,7 @@ export function GroupList() { Group Name Members + Resources Actions diff --git a/runtime/hub/frontend/apps/admin/src/pages/UserList.tsx b/runtime/hub/frontend/apps/admin/src/pages/UserList.tsx index 35d12d4..8c40e90 100644 --- a/runtime/hub/frontend/apps/admin/src/pages/UserList.tsx +++ b/runtime/hub/frontend/apps/admin/src/pages/UserList.tsx @@ -18,15 +18,17 @@ // SOFTWARE. import React, { useState, useEffect, useCallback, useMemo, memo } from 'react'; -import { useNavigate } from 'react-router-dom'; +import { NavBar } from '../components/NavBar'; import { Table, Button, Form, InputGroup, Badge, Spinner, Alert, ButtonGroup, Modal } from 'react-bootstrap'; import type { User, UserQuota, Server } from '@auplc/shared'; import * as api from '@auplc/shared'; import { isGitHubUser, isNativeUser as isNativeUsername } from '@auplc/shared'; import { CreateUserModal } from '../components/CreateUserModal'; import { SetPasswordModal } from '../components/SetPasswordModal'; +import { BatchPasswordModal } from '../components/BatchPasswordModal'; import { EditUserModal } from '../components/EditUserModal'; import { ConfirmModal } from '../components/ConfirmModal'; +import { UserDetailModal } from '../components/UserDetailModal'; // Map frontend sort columns to API sort parameters // JupyterHub API only supports: id, name, last_activity @@ -79,6 +81,7 @@ const SortIcon = memo(function SortIcon({ column, sortColumn, sortDirection }: { // Memoized UserRow component to prevent unnecessary re-renders interface UserRowProps { user: User; + currentUser: string; quotaEnabled: boolean; quotaMap: Map; selectedUsers: Set; @@ -98,10 +101,12 @@ interface UserRowProps { onEditUser: (user: User) => void; onPasswordReset: (user: User) => void; onDeleteUser: (user: User) => void; + onViewUsage: (username: string) => void; } const UserRow = memo(function UserRow({ user, + currentUser, quotaEnabled, quotaMap, selectedUsers, @@ -121,11 +126,14 @@ const UserRow = memo(function UserRow({ onEditUser, onPasswordReset, onDeleteUser, + onViewUsage, }: UserRowProps) { const isExpanded = expandedUsers.has(user.name); const isSelected = selectedUsers.has(user.name); const quota = quotaMap.get(user.name); const isEditingThisQuota = editingQuota === user.name; + // Protect admin users and the currently logged-in user from deletion + const isProtected = user.admin || user.name === currentUser; return ( @@ -252,7 +260,15 @@ const UserRow = memo(function UserRow({ Edit User - {isNativeUser(user) && user.name !== 'admin' && ( + + + {isNativeUser(user) && ( )} - {user.name !== 'admin' && ( + {!isProtected && ( )} + +
- -
); } diff --git a/runtime/hub/frontend/apps/admin/vite.config.ts b/runtime/hub/frontend/apps/admin/vite.config.ts index 96988f8..db24b32 100644 --- a/runtime/hub/frontend/apps/admin/vite.config.ts +++ b/runtime/hub/frontend/apps/admin/vite.config.ts @@ -19,10 +19,11 @@ import { defineConfig } from 'vite' import react from '@vitejs/plugin-react' +import tailwindcss from '@tailwindcss/vite' // https://vite.dev/config/ export default defineConfig({ - plugins: [react()], + plugins: [react(), tailwindcss()], // Base path for JupyterHub static files base: '/hub/static/admin-ui/', build: { diff --git a/runtime/hub/frontend/packages/shared/src/api/index.ts b/runtime/hub/frontend/packages/shared/src/api/index.ts index a15db3b..e5b5c35 100644 --- a/runtime/hub/frontend/packages/shared/src/api/index.ts +++ b/runtime/hub/frontend/packages/shared/src/api/index.ts @@ -23,3 +23,4 @@ export * from "./quota.js"; export * from "./accelerators.js"; export * from "./resources.js"; export * from "./git.js"; +export * from "./stats.js"; diff --git a/runtime/hub/frontend/packages/shared/src/api/quota.ts b/runtime/hub/frontend/packages/shared/src/api/quota.ts index d3b885e..feaf226 100644 --- a/runtime/hub/frontend/packages/shared/src/api/quota.ts +++ b/runtime/hub/frontend/packages/shared/src/api/quota.ts @@ -44,7 +44,7 @@ export async function setUserQuota( } export async function batchSetQuota( - users: Array<{ username: string; amount: number }> + users: Array<{ username: string; amount: number; unlimited?: boolean }> ): Promise<{ success: number; failed: number }> { return adminApiRequest<{ success: number; failed: number }>("/quota/batch", { method: "POST", diff --git a/runtime/hub/frontend/packages/shared/src/api/stats.ts b/runtime/hub/frontend/packages/shared/src/api/stats.ts new file mode 100644 index 0000000..7fe7468 --- /dev/null +++ b/runtime/hub/frontend/packages/shared/src/api/stats.ts @@ -0,0 +1,47 @@ +// Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +// SPDX-License-Identifier: MIT + +import { adminApiRequest } from "./client.js"; +import type { + DashboardOverview, + StatsDistributionResponse, + StatsUsageResponse, + UserDetail, +} from "../types/stats.js"; + +export async function getDashboardOverview(): Promise { + return adminApiRequest("/stats/overview"); +} + +export async function getUsageTimeSeries(days = 30, granularity: "day" | "week" = "day"): Promise { + return adminApiRequest(`/stats/usage?days=${days}&granularity=${granularity}`); +} + +export async function getDistribution(days = 30): Promise { + return adminApiRequest(`/stats/distribution?days=${days}`); +} + +export async function getHourlyDistribution(days = 30): Promise<{ hourly: import('../types/stats.js').HourlyUsage[] }> { + const tzOffset = -new Date().getTimezoneOffset(); // minutes ahead of UTC + return adminApiRequest(`/stats/hourly?days=${days}&tz_offset=${tzOffset}`); +} + +export async function getUserDetail(username: string, days = 30, granularity: "day" | "week" = "day"): Promise { + return adminApiRequest(`/stats/user/${encodeURIComponent(username)}?days=${days}&granularity=${granularity}`); +} + +export function createActiveSessionsSSE( + onData: (payload: { + active_sessions: import("../types/stats.js").ActiveSession[]; + pending_spawns: import("../types/stats.js").PendingSpawn[]; + }) => void +): EventSource { + const base = (window as { jhdata?: { base_url?: string } }).jhdata?.base_url ?? '/hub/'; + const es = new EventSource(`${base}admin/api/stats/active/stream`); + es.onmessage = (e) => { + try { + onData(JSON.parse(e.data)); + } catch { /* ignore parse errors */ } + }; + return es; +} diff --git a/runtime/hub/frontend/packages/shared/src/api/users.ts b/runtime/hub/frontend/packages/shared/src/api/users.ts index d73b184..0f38e9a 100644 --- a/runtime/hub/frontend/packages/shared/src/api/users.ts +++ b/runtime/hub/frontend/packages/shared/src/api/users.ts @@ -151,15 +151,41 @@ export async function setPassword( }); } +export async function batchSetPasswords( + users: Array<{ username: string; password: string }>, + force_change = true +): Promise<{ success: number; failed: number; results: Array<{ username: string; status: string; error?: string }> }> { + return adminApiRequest("/batch-set-password", { + method: "POST", + body: JSON.stringify({ users, force_change }), + }); +} + export async function generatePassword(): Promise<{ password: string }> { return adminApiRequest<{ password: string }>("/generate-password", { method: "GET", }); } -export async function getGroups(): Promise { - const response = await apiRequest>("/groups"); - return Object.values(response); +export interface GroupsResponse { + groups: Group[]; + github_org: string; +} + +export async function getGroups(): Promise { + return adminApiRequest("/groups"); +} + +export interface SyncGroupsResponse { + synced: number; + failed: number; + skipped: number; +} + +export async function syncGroups(): Promise { + return adminApiRequest("/groups/sync", { + method: "POST", + }); } export async function getGroup(groupName: string): Promise { @@ -173,16 +199,16 @@ export async function createGroup(groupName: string): Promise { } export async function deleteGroup(groupName: string): Promise { - return apiRequest(`/groups/${encodeURIComponent(groupName)}`, { + return adminApiRequest(`/groups/${encodeURIComponent(groupName)}`, { method: "DELETE", }); } export async function updateGroup( groupName: string, - data: { properties?: Record } + data: { properties?: Record; release_protection?: boolean } ): Promise { - return apiRequest(`/groups/${encodeURIComponent(groupName)}`, { + return adminApiRequest(`/groups/${encodeURIComponent(groupName)}`, { method: "PATCH", body: JSON.stringify(data), }); @@ -192,7 +218,7 @@ export async function addUserToGroup( groupName: string, username: string ): Promise { - return apiRequest(`/groups/${encodeURIComponent(groupName)}/users`, { + return adminApiRequest(`/groups/${encodeURIComponent(groupName)}/users`, { method: "POST", body: JSON.stringify({ users: [username] }), }); @@ -202,7 +228,7 @@ export async function removeUserFromGroup( groupName: string, username: string ): Promise { - return apiRequest(`/groups/${encodeURIComponent(groupName)}/users`, { + return adminApiRequest(`/groups/${encodeURIComponent(groupName)}/users`, { method: "DELETE", body: JSON.stringify({ users: [username] }), }); diff --git a/runtime/hub/frontend/packages/shared/src/types/index.ts b/runtime/hub/frontend/packages/shared/src/types/index.ts index 122998e..51a3ef5 100644 --- a/runtime/hub/frontend/packages/shared/src/types/index.ts +++ b/runtime/hub/frontend/packages/shared/src/types/index.ts @@ -22,3 +22,4 @@ export * from "./quota.js"; export * from "./accelerator.js"; export * from "./resource.js"; export * from "./hub.js"; +export * from "./stats.js"; diff --git a/runtime/hub/frontend/packages/shared/src/types/quota.ts b/runtime/hub/frontend/packages/shared/src/types/quota.ts index 0630425..ecbcf35 100644 --- a/runtime/hub/frontend/packages/shared/src/types/quota.ts +++ b/runtime/hub/frontend/packages/shared/src/types/quota.ts @@ -42,6 +42,7 @@ export interface QuotaRates { rates: Record; minimum_to_start: number; enabled: boolean; + default_quota?: number; } export interface UserQuotaInfo { diff --git a/runtime/hub/frontend/packages/shared/src/types/stats.ts b/runtime/hub/frontend/packages/shared/src/types/stats.ts new file mode 100644 index 0000000..f6083a2 --- /dev/null +++ b/runtime/hub/frontend/packages/shared/src/types/stats.ts @@ -0,0 +1,83 @@ +// Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +// SPDX-License-Identifier: MIT + +export interface DashboardOverview { + total_users: number; + active_sessions: number; + total_usage_minutes: number; + users_this_week: number; +} + +export interface DailyUsage { + date: string; + minutes: number; + sessions: number; + users: number; +} + +export interface ResourceDistribution { + resource_type: string; + resource_display?: string; + minutes: number; + sessions: number; + users: number; + avg_minutes: number; +} + +export interface ActiveSession { + username: string; + resource_type: string; + resource_display?: string; + accelerator_type?: string | null; + accelerator_display?: string | null; + start_time: string; + elapsed_minutes: number; + idle_warning: boolean; +} + +export interface PendingSpawn { + username: string; + started: string; + waiting_minutes: number; +} + +export interface TopUser { + username: string; + total_minutes: number; + sessions: number; +} + +export interface StatsUsageResponse { + daily_usage: DailyUsage[]; +} + +export interface StatsDistributionResponse { + by_resource: ResourceDistribution[]; + top_users: TopUser[]; +} + +export interface HourlyUsage { + hour: number; + sessions: number; + minutes: number; +} + +export interface UserSession { + resource_type: string; + resource_display?: string; + accelerator_type?: string | null; + accelerator_display?: string | null; + start_time: string; + end_time: string | null; + duration_minutes: number | null; + status: string; +} + +export interface UserDetail { + username: string; + total_minutes: number; + total_sessions: number; + usage: DailyUsage[]; + by_resource: ResourceDistribution[]; + recent_sessions: UserSession[]; +} diff --git a/runtime/hub/frontend/packages/shared/src/types/user.ts b/runtime/hub/frontend/packages/shared/src/types/user.ts index 28cbd47..0a69f61 100644 --- a/runtime/hub/frontend/packages/shared/src/types/user.ts +++ b/runtime/hub/frontend/packages/shared/src/types/user.ts @@ -70,4 +70,6 @@ export interface Group { name: string; users: string[]; properties: Record; + source?: "github-team" | "system" | "admin"; + resources?: string[]; } diff --git a/runtime/hub/frontend/pnpm-lock.yaml b/runtime/hub/frontend/pnpm-lock.yaml index afa1e0a..c4e9aab 100644 --- a/runtime/hub/frontend/pnpm-lock.yaml +++ b/runtime/hub/frontend/pnpm-lock.yaml @@ -9,6 +9,9 @@ catalogs: '@eslint/js': specifier: ^9.28.0 version: 9.39.2 + '@tailwindcss/vite': + specifier: ^4.1.0 + version: 4.2.2 '@types/node': specifier: ^25.2.3 version: 25.2.3 @@ -39,6 +42,9 @@ catalogs: react-dom: specifier: ^19.2.4 version: 19.2.4 + recharts: + specifier: ^2.15.0 + version: 2.15.4 typescript: specifier: ^5.9.3 version: 5.9.3 @@ -79,10 +85,19 @@ importers: react-select: specifier: ^5.10.2 version: 5.10.2(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + recharts: + specifier: 'catalog:' + version: 2.15.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4) devDependencies: '@eslint/js': specifier: 'catalog:' version: 9.39.2 + '@playwright/test': + specifier: ^1.58.2 + version: 1.58.2 + '@tailwindcss/vite': + specifier: 'catalog:' + version: 4.2.2(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0)) '@types/node': specifier: 'catalog:' version: 25.2.3 @@ -94,7 +109,7 @@ importers: version: 19.2.3(@types/react@19.2.14) '@vitejs/plugin-react': specifier: 'catalog:' - version: 5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)) + version: 5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0)) eslint: specifier: 'catalog:' version: 9.39.2(jiti@2.6.1) @@ -115,7 +130,7 @@ importers: version: 8.55.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) vite: specifier: 'catalog:' - version: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2) + version: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0) apps/spawn: dependencies: @@ -143,7 +158,7 @@ importers: version: 19.2.3(@types/react@19.2.14) '@vitejs/plugin-react': specifier: 'catalog:' - version: 5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)) + version: 5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0)) eslint: specifier: 'catalog:' version: 9.39.2(jiti@2.6.1) @@ -164,7 +179,7 @@ importers: version: 8.55.0(eslint@9.39.2(jiti@2.6.1))(typescript@5.9.3) vite: specifier: 'catalog:' - version: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2) + version: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0) packages/shared: devDependencies: @@ -552,6 +567,11 @@ packages: '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + '@playwright/test@1.58.2': + resolution: {integrity: sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA==} + engines: {node: '>=18'} + hasBin: true + '@popperjs/core@2.11.8': resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==} @@ -708,6 +728,96 @@ packages: '@swc/helpers@0.5.18': resolution: {integrity: sha512-TXTnIcNJQEKwThMMqBXsZ4VGAza6bvN4pa41Rkqoio6QBKMvo+5lexeTMScGCIxtzgQJzElcvIltani+adC5PQ==} + '@tailwindcss/node@4.2.2': + resolution: {integrity: sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==} + + '@tailwindcss/oxide-android-arm64@4.2.2': + resolution: {integrity: sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==} + engines: {node: '>= 20'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.2.2': + resolution: {integrity: sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==} + engines: {node: '>= 20'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.2.2': + resolution: {integrity: sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==} + engines: {node: '>= 20'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.2.2': + resolution: {integrity: sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==} + engines: {node: '>= 20'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': + resolution: {integrity: sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==} + engines: {node: '>= 20'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': + resolution: {integrity: sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==} + engines: {node: '>= 20'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.2.2': + resolution: {integrity: sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==} + engines: {node: '>= 20'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.2.2': + resolution: {integrity: sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==} + engines: {node: '>= 20'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.2.2': + resolution: {integrity: sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==} + engines: {node: '>= 20'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-wasm32-wasi@4.2.2': + resolution: {integrity: sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + bundledDependencies: + - '@napi-rs/wasm-runtime' + - '@emnapi/core' + - '@emnapi/runtime' + - '@tybys/wasm-util' + - '@emnapi/wasi-threads' + - tslib + + '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': + resolution: {integrity: sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==} + engines: {node: '>= 20'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.2.2': + resolution: {integrity: sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==} + engines: {node: '>= 20'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.2.2': + resolution: {integrity: sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==} + engines: {node: '>= 20'} + + '@tailwindcss/vite@4.2.2': + resolution: {integrity: sha512-mEiF5HO1QqCLXoNEfXVA1Tzo+cYsrqV7w9Juj2wdUFyW07JRenqMG225MvPwr3ZD9N1bFQj46X7r33iHxLUW0w==} + peerDependencies: + vite: ^5.2.0 || ^6 || ^7 || ^8 + '@types/babel__core@7.20.5': resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} @@ -720,6 +830,33 @@ packages: '@types/babel__traverse@7.28.0': resolution: {integrity: sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==} + '@types/d3-array@3.2.2': + resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} + + '@types/d3-color@3.1.3': + resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} + + '@types/d3-ease@3.0.2': + resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} + + '@types/d3-interpolate@3.0.4': + resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} + + '@types/d3-path@3.1.1': + resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} + + '@types/d3-scale@4.0.9': + resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} + + '@types/d3-shape@3.1.8': + resolution: {integrity: sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==} + + '@types/d3-time@3.0.4': + resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} + + '@types/d3-timer@3.0.2': + resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==} + '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} @@ -880,6 +1017,10 @@ packages: classnames@2.5.1: resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + color-convert@2.0.1: resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} engines: {node: '>=7.0.0'} @@ -911,6 +1052,50 @@ packages: csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + d3-array@3.2.4: + resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} + engines: {node: '>=12'} + + d3-color@3.1.0: + resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} + engines: {node: '>=12'} + + d3-ease@3.0.1: + resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} + engines: {node: '>=12'} + + d3-format@3.1.2: + resolution: {integrity: sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==} + engines: {node: '>=12'} + + d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + + d3-path@3.1.0: + resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} + engines: {node: '>=12'} + + d3-scale@4.0.2: + resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} + engines: {node: '>=12'} + + d3-shape@3.2.0: + resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} + engines: {node: '>=12'} + + d3-time-format@4.1.0: + resolution: {integrity: sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==} + engines: {node: '>=12'} + + d3-time@3.1.0: + resolution: {integrity: sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==} + engines: {node: '>=12'} + + d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + debug@4.4.3: resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} @@ -920,6 +1105,9 @@ packages: supports-color: optional: true + decimal.js-light@2.5.1: + resolution: {integrity: sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==} + deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} @@ -937,6 +1125,10 @@ packages: electron-to-chromium@1.5.286: resolution: {integrity: sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==} + enhanced-resolve@5.20.1: + resolution: {integrity: sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==} + engines: {node: '>=10.13.0'} + error-ex@1.3.4: resolution: {integrity: sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==} @@ -1006,9 +1198,16 @@ packages: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} + eventemitter3@4.0.7: + resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} + fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + fast-equals@5.4.0: + resolution: {integrity: sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==} + engines: {node: '>=6.0.0'} + fast-json-stable-stringify@2.1.0: resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} @@ -1042,6 +1241,11 @@ packages: flatted@3.3.3: resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + fsevents@2.3.3: resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} @@ -1066,6 +1270,9 @@ packages: resolution: {integrity: sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==} engines: {node: '>=18'} + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} @@ -1093,6 +1300,10 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} + internmap@2.0.3: + resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} + engines: {node: '>=12'} + invariant@2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} @@ -1154,74 +1365,74 @@ packages: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} - lightningcss-android-arm64@1.30.2: - resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==} + lightningcss-android-arm64@1.32.0: + resolution: {integrity: sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [android] - lightningcss-darwin-arm64@1.30.2: - resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==} + lightningcss-darwin-arm64@1.32.0: + resolution: {integrity: sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [darwin] - lightningcss-darwin-x64@1.30.2: - resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==} + lightningcss-darwin-x64@1.32.0: + resolution: {integrity: sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [darwin] - lightningcss-freebsd-x64@1.30.2: - resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==} + lightningcss-freebsd-x64@1.32.0: + resolution: {integrity: sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [freebsd] - lightningcss-linux-arm-gnueabihf@1.30.2: - resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==} + lightningcss-linux-arm-gnueabihf@1.32.0: + resolution: {integrity: sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==} engines: {node: '>= 12.0.0'} cpu: [arm] os: [linux] - lightningcss-linux-arm64-gnu@1.30.2: - resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==} + lightningcss-linux-arm64-gnu@1.32.0: + resolution: {integrity: sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-arm64-musl@1.30.2: - resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==} + lightningcss-linux-arm64-musl@1.32.0: + resolution: {integrity: sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-x64-gnu@1.30.2: - resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==} + lightningcss-linux-x64-gnu@1.32.0: + resolution: {integrity: sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-linux-x64-musl@1.30.2: - resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==} + lightningcss-linux-x64-musl@1.32.0: + resolution: {integrity: sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-win32-arm64-msvc@1.30.2: - resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==} + lightningcss-win32-arm64-msvc@1.32.0: + resolution: {integrity: sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [win32] - lightningcss-win32-x64-msvc@1.30.2: - resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==} + lightningcss-win32-x64-msvc@1.32.0: + resolution: {integrity: sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [win32] - lightningcss@1.30.2: - resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==} + lightningcss@1.32.0: + resolution: {integrity: sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==} engines: {node: '>= 12.0.0'} lines-and-columns@1.2.4: @@ -1234,6 +1445,9 @@ packages: lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + lodash@4.17.23: + resolution: {integrity: sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==} + loose-envify@1.4.0: resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} hasBin: true @@ -1241,6 +1455,9 @@ packages: lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + memoize-one@6.0.0: resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} @@ -1311,6 +1528,16 @@ packages: resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} + playwright-core@1.58.2: + resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} + engines: {node: '>=18'} + hasBin: true + + playwright@1.58.2: + resolution: {integrity: sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==} + engines: {node: '>=18'} + hasBin: true + postcss@8.5.6: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} @@ -1349,6 +1576,9 @@ packages: react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + react-is@18.3.1: + resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + react-lifecycles-compat@3.0.4: resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==} @@ -1379,6 +1609,12 @@ packages: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-smooth@4.0.4: + resolution: {integrity: sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-transition-group@4.4.5: resolution: {integrity: sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==} peerDependencies: @@ -1389,6 +1625,16 @@ packages: resolution: {integrity: sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==} engines: {node: '>=0.10.0'} + recharts-scale@0.4.5: + resolution: {integrity: sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==} + + recharts@2.15.4: + resolution: {integrity: sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==} + engines: {node: '>=14'} + peerDependencies: + react: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} @@ -1449,6 +1695,16 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} + tailwindcss@4.2.2: + resolution: {integrity: sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==} + + tapable@2.3.2: + resolution: {integrity: sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==} + engines: {node: '>=6'} + + tiny-invariant@1.3.3: + resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} @@ -1509,6 +1765,9 @@ packages: '@types/react': optional: true + victory-vendor@36.9.2: + resolution: {integrity: sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==} + vite@7.3.1: resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} engines: {node: ^20.19.0 || >=22.12.0} @@ -1917,6 +2176,10 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 + '@playwright/test@1.58.2': + dependencies: + playwright: 1.58.2 + '@popperjs/core@2.11.8': {} '@react-aria/ssr@3.9.10(react@19.2.4)': @@ -2029,6 +2292,74 @@ snapshots: dependencies: tslib: 2.8.1 + '@tailwindcss/node@4.2.2': + dependencies: + '@jridgewell/remapping': 2.3.5 + enhanced-resolve: 5.20.1 + jiti: 2.6.1 + lightningcss: 1.32.0 + magic-string: 0.30.21 + source-map-js: 1.2.1 + tailwindcss: 4.2.2 + + '@tailwindcss/oxide-android-arm64@4.2.2': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.2.2': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.2.2': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.2.2': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.2.2': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.2.2': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.2.2': + optional: true + + '@tailwindcss/oxide-wasm32-wasi@4.2.2': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.2.2': + optional: true + + '@tailwindcss/oxide@4.2.2': + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.2.2 + '@tailwindcss/oxide-darwin-arm64': 4.2.2 + '@tailwindcss/oxide-darwin-x64': 4.2.2 + '@tailwindcss/oxide-freebsd-x64': 4.2.2 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.2.2 + '@tailwindcss/oxide-linux-arm64-gnu': 4.2.2 + '@tailwindcss/oxide-linux-arm64-musl': 4.2.2 + '@tailwindcss/oxide-linux-x64-gnu': 4.2.2 + '@tailwindcss/oxide-linux-x64-musl': 4.2.2 + '@tailwindcss/oxide-wasm32-wasi': 4.2.2 + '@tailwindcss/oxide-win32-arm64-msvc': 4.2.2 + '@tailwindcss/oxide-win32-x64-msvc': 4.2.2 + + '@tailwindcss/vite@4.2.2(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0))': + dependencies: + '@tailwindcss/node': 4.2.2 + '@tailwindcss/oxide': 4.2.2 + tailwindcss: 4.2.2 + vite: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0) + '@types/babel__core@7.20.5': dependencies: '@babel/parser': 7.29.0 @@ -2050,6 +2381,30 @@ snapshots: dependencies: '@babel/types': 7.29.0 + '@types/d3-array@3.2.2': {} + + '@types/d3-color@3.1.3': {} + + '@types/d3-ease@3.0.2': {} + + '@types/d3-interpolate@3.0.4': + dependencies: + '@types/d3-color': 3.1.3 + + '@types/d3-path@3.1.1': {} + + '@types/d3-scale@4.0.9': + dependencies: + '@types/d3-time': 3.0.4 + + '@types/d3-shape@3.1.8': + dependencies: + '@types/d3-path': 3.1.1 + + '@types/d3-time@3.0.4': {} + + '@types/d3-timer@3.0.2': {} + '@types/estree@1.0.8': {} '@types/json-schema@7.0.15': {} @@ -2167,7 +2522,7 @@ snapshots: '@typescript-eslint/types': 8.55.0 eslint-visitor-keys: 4.2.1 - '@vitejs/plugin-react@5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2))': + '@vitejs/plugin-react@5.1.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0))': dependencies: '@babel/core': 7.29.0 '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.29.0) @@ -2175,7 +2530,7 @@ snapshots: '@rolldown/pluginutils': 1.0.0-rc.3 '@types/babel__core': 7.20.5 react-refresh: 0.18.0 - vite: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2) + vite: 7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0) transitivePeerDependencies: - supports-color @@ -2242,6 +2597,8 @@ snapshots: classnames@2.5.1: {} + clsx@2.1.1: {} + color-convert@2.0.1: dependencies: color-name: 1.1.4 @@ -2272,16 +2629,55 @@ snapshots: csstype@3.2.3: {} + d3-array@3.2.4: + dependencies: + internmap: 2.0.3 + + d3-color@3.1.0: {} + + d3-ease@3.0.1: {} + + d3-format@3.1.2: {} + + d3-interpolate@3.0.1: + dependencies: + d3-color: 3.1.0 + + d3-path@3.1.0: {} + + d3-scale@4.0.2: + dependencies: + d3-array: 3.2.4 + d3-format: 3.1.2 + d3-interpolate: 3.0.1 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + + d3-shape@3.2.0: + dependencies: + d3-path: 3.1.0 + + d3-time-format@4.1.0: + dependencies: + d3-time: 3.1.0 + + d3-time@3.1.0: + dependencies: + d3-array: 3.2.4 + + d3-timer@3.0.1: {} + debug@4.4.3: dependencies: ms: 2.1.3 + decimal.js-light@2.5.1: {} + deep-is@0.1.4: {} dequal@2.0.3: {} - detect-libc@2.1.2: - optional: true + detect-libc@2.1.2: {} dom-helpers@5.2.1: dependencies: @@ -2290,6 +2686,11 @@ snapshots: electron-to-chromium@1.5.286: {} + enhanced-resolve@5.20.1: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.3.2 + error-ex@1.3.4: dependencies: is-arrayish: 0.2.1 @@ -2403,8 +2804,12 @@ snapshots: esutils@2.0.3: {} + eventemitter3@4.0.7: {} + fast-deep-equal@3.1.3: {} + fast-equals@5.4.0: {} + fast-json-stable-stringify@2.1.0: {} fast-levenshtein@2.0.6: {} @@ -2431,6 +2836,9 @@ snapshots: flatted@3.3.3: {} + fsevents@2.3.2: + optional: true + fsevents@2.3.3: optional: true @@ -2446,6 +2854,8 @@ snapshots: globals@16.5.0: {} + graceful-fs@4.2.11: {} + has-flag@4.0.0: {} hasown@2.0.2: @@ -2467,6 +2877,8 @@ snapshots: imurmurhash@0.1.4: {} + internmap@2.0.3: {} + invariant@2.2.4: dependencies: loose-envify: 1.4.0 @@ -2485,8 +2897,7 @@ snapshots: isexe@2.0.0: {} - jiti@2.6.1: - optional: true + jiti@2.6.1: {} js-tokens@4.0.0: {} @@ -2515,55 +2926,54 @@ snapshots: prelude-ls: 1.2.1 type-check: 0.4.0 - lightningcss-android-arm64@1.30.2: + lightningcss-android-arm64@1.32.0: optional: true - lightningcss-darwin-arm64@1.30.2: + lightningcss-darwin-arm64@1.32.0: optional: true - lightningcss-darwin-x64@1.30.2: + lightningcss-darwin-x64@1.32.0: optional: true - lightningcss-freebsd-x64@1.30.2: + lightningcss-freebsd-x64@1.32.0: optional: true - lightningcss-linux-arm-gnueabihf@1.30.2: + lightningcss-linux-arm-gnueabihf@1.32.0: optional: true - lightningcss-linux-arm64-gnu@1.30.2: + lightningcss-linux-arm64-gnu@1.32.0: optional: true - lightningcss-linux-arm64-musl@1.30.2: + lightningcss-linux-arm64-musl@1.32.0: optional: true - lightningcss-linux-x64-gnu@1.30.2: + lightningcss-linux-x64-gnu@1.32.0: optional: true - lightningcss-linux-x64-musl@1.30.2: + lightningcss-linux-x64-musl@1.32.0: optional: true - lightningcss-win32-arm64-msvc@1.30.2: + lightningcss-win32-arm64-msvc@1.32.0: optional: true - lightningcss-win32-x64-msvc@1.30.2: + lightningcss-win32-x64-msvc@1.32.0: optional: true - lightningcss@1.30.2: + lightningcss@1.32.0: dependencies: detect-libc: 2.1.2 optionalDependencies: - lightningcss-android-arm64: 1.30.2 - lightningcss-darwin-arm64: 1.30.2 - lightningcss-darwin-x64: 1.30.2 - lightningcss-freebsd-x64: 1.30.2 - lightningcss-linux-arm-gnueabihf: 1.30.2 - lightningcss-linux-arm64-gnu: 1.30.2 - lightningcss-linux-arm64-musl: 1.30.2 - lightningcss-linux-x64-gnu: 1.30.2 - lightningcss-linux-x64-musl: 1.30.2 - lightningcss-win32-arm64-msvc: 1.30.2 - lightningcss-win32-x64-msvc: 1.30.2 - optional: true + lightningcss-android-arm64: 1.32.0 + lightningcss-darwin-arm64: 1.32.0 + lightningcss-darwin-x64: 1.32.0 + lightningcss-freebsd-x64: 1.32.0 + lightningcss-linux-arm-gnueabihf: 1.32.0 + lightningcss-linux-arm64-gnu: 1.32.0 + lightningcss-linux-arm64-musl: 1.32.0 + lightningcss-linux-x64-gnu: 1.32.0 + lightningcss-linux-x64-musl: 1.32.0 + lightningcss-win32-arm64-msvc: 1.32.0 + lightningcss-win32-x64-msvc: 1.32.0 lines-and-columns@1.2.4: {} @@ -2573,6 +2983,8 @@ snapshots: lodash.merge@4.6.2: {} + lodash@4.17.23: {} + loose-envify@1.4.0: dependencies: js-tokens: 4.0.0 @@ -2581,6 +2993,10 @@ snapshots: dependencies: yallist: 3.1.1 + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + memoize-one@6.0.0: {} minimatch@3.1.2: @@ -2641,6 +3057,14 @@ snapshots: picomatch@4.0.3: {} + playwright-core@1.58.2: {} + + playwright@1.58.2: + dependencies: + playwright-core: 1.58.2 + optionalDependencies: + fsevents: 2.3.2 + postcss@8.5.6: dependencies: nanoid: 3.3.11 @@ -2690,6 +3114,8 @@ snapshots: react-is@16.13.1: {} + react-is@18.3.1: {} + react-lifecycles-compat@3.0.4: {} react-refresh@0.18.0: {} @@ -2725,6 +3151,14 @@ snapshots: - '@types/react' - supports-color + react-smooth@4.0.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + dependencies: + fast-equals: 5.4.0 + prop-types: 15.8.1 + react: 19.2.4 + react-dom: 19.2.4(react@19.2.4) + react-transition-group: 4.4.5(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + react-transition-group@4.4.5(react-dom@19.2.4(react@19.2.4))(react@19.2.4): dependencies: '@babel/runtime': 7.28.6 @@ -2736,6 +3170,23 @@ snapshots: react@19.2.4: {} + recharts-scale@0.4.5: + dependencies: + decimal.js-light: 2.5.1 + + recharts@2.15.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4): + dependencies: + clsx: 2.1.1 + eventemitter3: 4.0.7 + lodash: 4.17.23 + react: 19.2.4 + react-dom: 19.2.4(react@19.2.4) + react-is: 18.3.1 + react-smooth: 4.0.4(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + recharts-scale: 0.4.5 + tiny-invariant: 1.3.3 + victory-vendor: 36.9.2 + resolve-from@4.0.0: {} resolve@1.22.11: @@ -2803,6 +3254,12 @@ snapshots: supports-preserve-symlinks-flag@1.0.0: {} + tailwindcss@4.2.2: {} + + tapable@2.3.2: {} + + tiny-invariant@1.3.3: {} + tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) @@ -2861,7 +3318,24 @@ snapshots: optionalDependencies: '@types/react': 19.2.14 - vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2): + victory-vendor@36.9.2: + dependencies: + '@types/d3-array': 3.2.2 + '@types/d3-ease': 3.0.2 + '@types/d3-interpolate': 3.0.4 + '@types/d3-scale': 4.0.9 + '@types/d3-shape': 3.1.8 + '@types/d3-time': 3.0.4 + '@types/d3-timer': 3.0.2 + d3-array: 3.2.4 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-scale: 4.0.2 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-timer: 3.0.1 + + vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.32.0): dependencies: esbuild: 0.27.3 fdir: 6.5.0(picomatch@4.0.3) @@ -2873,7 +3347,7 @@ snapshots: '@types/node': 25.2.3 fsevents: 2.3.3 jiti: 2.6.1 - lightningcss: 1.30.2 + lightningcss: 1.32.0 warning@4.0.3: dependencies: diff --git a/runtime/hub/frontend/pnpm-workspace.yaml b/runtime/hub/frontend/pnpm-workspace.yaml index f87f6a2..2b3a791 100644 --- a/runtime/hub/frontend/pnpm-workspace.yaml +++ b/runtime/hub/frontend/pnpm-workspace.yaml @@ -27,3 +27,8 @@ catalog: # Build tools vite: ^7.3.1 "@vitejs/plugin-react": ^5.1.4 + + # Charts & Dashboard + recharts: ^2.15.0 + tailwindcss: ^4.1.0 + "@tailwindcss/vite": ^4.1.0 diff --git a/runtime/hub/frontend/templates/spawn.html b/runtime/hub/frontend/templates/spawn.html index c1dbd45..102ec37 100644 --- a/runtime/hub/frontend/templates/spawn.html +++ b/runtime/hub/frontend/templates/spawn.html @@ -37,15 +37,13 @@
+ {{ spawner_options_form | safe }}
diff --git a/runtime/hub/tests/test_groups.py b/runtime/hub/tests/test_groups.py new file mode 100644 index 0000000..0325d2f --- /dev/null +++ b/runtime/hub/tests/test_groups.py @@ -0,0 +1,354 @@ +# Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Unit tests for core.groups module.""" + +from __future__ import annotations + +import sys +from types import ModuleType +from unittest.mock import MagicMock + +# --------------------------------------------------------------------------- +# Stub out jupyterhub.orm so we can import core.groups without a real hub +# --------------------------------------------------------------------------- + + +class FakeORMGroup: + """Lightweight stand-in for jupyterhub.orm.Group.""" + + def __init__(self, name: str = "", properties: dict | None = None): + self.name = name + self.properties = properties or {} + self.users: list = [] + + def __repr__(self): + return f"" + + +class FakeQuery: + """Minimal query mock that supports filter_by().first().""" + + def __init__(self, groups: list[FakeORMGroup]): + self._groups = groups + + def filter_by(self, **kwargs): + name = kwargs.get("name") + self._filtered = [g for g in self._groups if g.name == name] + return self + + def first(self): + return self._filtered[0] if self._filtered else None + + +class FakeDB: + """Minimal DB mock with query(), add(), commit(), delete().""" + + def __init__(self, groups: list[FakeORMGroup] | None = None): + self.groups = groups or [] + self._added: list = [] + self._deleted: list = [] + self._committed = 0 + + def query(self, model): + return FakeQuery(self.groups) + + def add(self, obj): + self._added.append(obj) + self.groups.append(obj) + + def delete(self, obj): + self._deleted.append(obj) + self.groups.remove(obj) + + def commit(self): + self._committed += 1 + + +class FakeORMUser: + """Stand-in for jupyterhub.orm.User.""" + + def __init__(self, name: str = "", groups: list[FakeORMGroup] | None = None): + self.name = name + self.groups = groups or [] + + +class FakeUser: + """Stand-in for jupyterhub.user.User (wrapper around orm_user).""" + + def __init__(self, name: str = "", groups: list[FakeORMGroup] | None = None): + self.name = name + self.orm_user = FakeORMUser(name=name, groups=groups or []) + self.db = FakeDB(groups or []) + + +import importlib.util # noqa: E402 +from pathlib import Path # noqa: E402 + +# Install stubs before importing core.groups +_orm_mod = ModuleType("jupyterhub.orm") +_orm_mod.Group = FakeORMGroup # type: ignore[attr-defined] +sys.modules.setdefault("jupyterhub", ModuleType("jupyterhub")) +sys.modules["jupyterhub.orm"] = _orm_mod + +# Also stub aiohttp so the module-level import doesn't fail +sys.modules.setdefault("aiohttp", MagicMock()) + +_groups_path = Path(__file__).resolve().parent.parent / "core" / "groups.py" +_spec = importlib.util.spec_from_file_location("core.groups", _groups_path) +_groups_mod = importlib.util.module_from_spec(_spec) +sys.modules["core.groups"] = _groups_mod +_spec.loader.exec_module(_groups_mod) # type: ignore[union-attr] + +GITHUB_TEAM_SOURCE = _groups_mod.GITHUB_TEAM_SOURCE +SYSTEM_SOURCE = _groups_mod.SYSTEM_SOURCE +assign_user_to_group = _groups_mod.assign_user_to_group +get_resources_for_user = _groups_mod.get_resources_for_user +is_readonly_group = _groups_mod.is_readonly_group +is_undeletable_group = _groups_mod.is_undeletable_group +sync_user_github_teams = _groups_mod.sync_user_github_teams + + +# ========================================================================= +# is_readonly_group / is_undeletable_group +# ========================================================================= + + +class TestGroupProtection: + def test_github_team_is_not_readonly(self): + # GitHub-team groups allow manual member additions + g = FakeORMGroup("gpu", {"source": GITHUB_TEAM_SOURCE}) + assert is_readonly_group(g) is False + + def test_system_is_readonly(self): + g = FakeORMGroup("native-users", {"source": SYSTEM_SOURCE}) + assert is_readonly_group(g) is True + + def test_admin_is_not_readonly(self): + g = FakeORMGroup("custom", {"source": "admin"}) + assert is_readonly_group(g) is False + + def test_no_source_is_not_readonly(self): + g = FakeORMGroup("old-group", {}) + assert is_readonly_group(g) is False + + def test_github_team_is_undeletable(self): + g = FakeORMGroup("gpu", {"source": GITHUB_TEAM_SOURCE}) + assert is_undeletable_group(g) is True + + def test_system_is_undeletable(self): + g = FakeORMGroup("native-users", {"source": SYSTEM_SOURCE}) + assert is_undeletable_group(g) is True + + def test_admin_is_deletable(self): + g = FakeORMGroup("custom", {"source": "admin"}) + assert is_undeletable_group(g) is False + + +# ========================================================================= +# sync_user_github_teams +# ========================================================================= + + +class TestSyncUserGitHubTeams: + def test_creates_new_group_and_adds_user(self): + db = FakeDB() + user = FakeUser("alice") + user.db = db + + sync_user_github_teams(user, ["gpu"], {"gpu", "cpu"}, db) + + assert len(db.groups) == 1 + assert db.groups[0].name == "gpu" + assert db.groups[0].properties["source"] == GITHUB_TEAM_SOURCE + assert db.groups[0] in user.orm_user.groups + + def test_ignores_teams_not_in_mapping(self): + db = FakeDB() + user = FakeUser("alice") + user.db = db + + sync_user_github_teams(user, ["unknown-team"], {"gpu", "cpu"}, db) + + assert len(db.groups) == 0 + + def test_adds_user_to_existing_group(self): + existing = FakeORMGroup("gpu", {"source": GITHUB_TEAM_SOURCE}) + db = FakeDB([existing]) + user = FakeUser("alice") + user.db = db + + sync_user_github_teams(user, ["gpu"], {"gpu"}, db) + + assert existing in user.orm_user.groups + + def test_removes_user_from_old_github_team(self): + old_group = FakeORMGroup("cpu", {"source": GITHUB_TEAM_SOURCE}) + db = FakeDB([old_group]) + user = FakeUser("alice", groups=[old_group]) + user.db = db + + # User is no longer in "cpu" team, only in "gpu" + sync_user_github_teams(user, ["gpu"], {"gpu", "cpu"}, db) + + assert old_group not in user.orm_user.groups + + def test_does_not_remove_user_from_non_github_group(self): + admin_group = FakeORMGroup("custom", {"source": "admin"}) + db = FakeDB([admin_group]) + user = FakeUser("alice", groups=[admin_group]) + user.db = db + + sync_user_github_teams(user, [], {"gpu"}, db) + + # Admin group should not be touched + assert admin_group in user.orm_user.groups + + def test_promotes_admin_group_to_github_team(self): + admin_group = FakeORMGroup("gpu", {"source": "admin"}) + db = FakeDB([admin_group]) + user = FakeUser("alice") + user.db = db + + sync_user_github_teams(user, ["gpu"], {"gpu"}, db) + + assert admin_group.properties["source"] == GITHUB_TEAM_SOURCE + + def test_backfills_source_on_group_without_source(self): + no_source = FakeORMGroup("gpu", {}) + db = FakeDB([no_source]) + user = FakeUser("alice") + user.db = db + + sync_user_github_teams(user, ["gpu"], {"gpu"}, db) + + assert no_source.properties["source"] == GITHUB_TEAM_SOURCE + + +# ========================================================================= +# assign_user_to_group +# ========================================================================= + + +class TestAssignUserToGroup: + def test_creates_group_if_not_exists(self): + db = FakeDB() + user = FakeUser("bob") + user.db = db + + assign_user_to_group(user, "native-users", db) + + assert len(db.groups) == 1 + assert db.groups[0].name == "native-users" + assert db.groups[0].properties["source"] == SYSTEM_SOURCE + assert db.groups[0] in user.orm_user.groups + + def test_adds_user_to_existing_group(self): + existing = FakeORMGroup("native-users", {"source": SYSTEM_SOURCE}) + db = FakeDB([existing]) + user = FakeUser("bob") + user.db = db + + assign_user_to_group(user, "native-users", db) + + assert existing in user.orm_user.groups + + def test_does_not_duplicate_membership(self): + existing = FakeORMGroup("native-users", {"source": SYSTEM_SOURCE}) + db = FakeDB([existing]) + user = FakeUser("bob", groups=[existing]) + user.db = db + + assign_user_to_group(user, "native-users", db) + + # No extra commit for membership since user is already a member + assert user.orm_user.groups.count(existing) == 1 + + def test_backfills_source_on_existing_group_without_source(self): + no_source = FakeORMGroup("native-users", {}) + db = FakeDB([no_source]) + user = FakeUser("bob") + user.db = db + + assign_user_to_group(user, "native-users", db) + + assert no_source.properties["source"] == SYSTEM_SOURCE + + +# ========================================================================= +# get_resources_for_user +# ========================================================================= + + +class TestGetResourcesForUser: + def _make_user_with_groups(self, group_names: list[str]) -> FakeUser: + groups = [FakeORMGroup(name) for name in group_names] + return FakeUser("alice", groups=groups) + + def test_returns_resources_for_matching_groups(self): + user = self._make_user_with_groups(["gpu"]) + mapping = {"gpu": ["res-a", "res-b"], "cpu": ["res-c"]} + + result = get_resources_for_user(user, mapping) + + assert result == ["res-a", "res-b"] + + def test_official_shortcircuits(self): + user = self._make_user_with_groups(["official", "gpu"]) + mapping = { + "official": ["res-a", "res-b", "res-c"], + "gpu": ["res-a"], + } + + result = get_resources_for_user(user, mapping) + + assert result == ["res-a", "res-b", "res-c"] + + def test_merges_multiple_groups(self): + user = self._make_user_with_groups(["gpu", "cpu"]) + mapping = {"gpu": ["res-a"], "cpu": ["res-b", "res-c"]} + + result = get_resources_for_user(user, mapping) + + assert set(result) == {"res-a", "res-b", "res-c"} + + def test_deduplicates_resources(self): + user = self._make_user_with_groups(["gpu", "cpu"]) + mapping = {"gpu": ["res-a", "res-b"], "cpu": ["res-b", "res-c"]} + + result = get_resources_for_user(user, mapping) + + assert set(result) == {"res-a", "res-b", "res-c"} + assert len(result) == 3 # no duplicates + + def test_returns_empty_for_no_matching_groups(self): + user = self._make_user_with_groups(["unknown"]) + mapping = {"gpu": ["res-a"]} + + result = get_resources_for_user(user, mapping) + + assert result == [] + + def test_returns_empty_for_user_with_no_groups(self): + user = self._make_user_with_groups([]) + mapping = {"gpu": ["res-a"]} + + result = get_resources_for_user(user, mapping) + + assert result == [] diff --git a/runtime/values.yaml b/runtime/values.yaml index a349af5..8cb99fc 100644 --- a/runtime/values.yaml +++ b/runtime/values.yaml @@ -54,6 +54,20 @@ custom: adminUser: enabled: false + # ============================================================================ + # Allowed Origins + # ============================================================================ + # hub.allowedOrigins — sets Access-Control-Allow-Origin on Hub HTTP responses + # notebook.allowedOrigins — injected into each notebook server's startup args + # (--ServerApp.allow_origin_pat); handles kernel WebSocket + # + # Use ["*"] to allow all, or list specific domains. + # Example: ["https://mylab.example.com", "https://alt.example.org"] + hub: + allowedOrigins: [] + notebook: + allowedOrigins: [] + # ============================================================================ # Git Repository Cloning # ============================================================================ @@ -289,6 +303,9 @@ custom: - Course-CV - Course-DL - Course-LLM + - Course-PhySim + - cpu + - gpu # ============================================================================ # Quota Management