diff --git a/src/Dockerfile b/src/Dockerfile index a886c489..63a5ff10 100644 --- a/src/Dockerfile +++ b/src/Dockerfile @@ -258,16 +258,27 @@ RUN mkdir -p "${OUTPUTDIR}/usr/local/bin"; # copy pki-authority service files ADD rootfs/files/scripts/install_lxc_deps.sh /buildroot/files/scripts/ -ADD rootfs/files/configs/pki-service/pki-authority.service "${OUTPUTDIR}/etc/systemd/system" -RUN ln -s /etc/systemd/system/pki-authority.service "${OUTPUTDIR}/etc/systemd/system/multi-user.target.wants/pki-authority.service" -ADD rootfs/files/configs/pki-service/create-and-configure-pki.sh "${OUTPUTDIR}/usr/local/bin" -RUN mkdir -p "${OUTPUTDIR}/root/containers" -COPY --from=ghcr.io/super-protocol/tee-pki-authority-service-lxc:build-18725490828 /pki-authority.tar "${OUTPUTDIR}/root/containers/pki-authority.tar" -ADD rootfs/files/configs/pki-service/lxc-template.yaml "${OUTPUTDIR}/root/containers/lxc-template.yaml" -ADD rootfs/files/configs/pki-service/dnsmasq.conf "${OUTPUTDIR}/etc/lxc/dnsmasq.conf" -ADD rootfs/files/configs/pki-service/lxc-net "${OUTPUTDIR}/etc/default/lxc-net" RUN --security=insecure /buildroot/files/scripts/install_lxc_deps.sh +ARG PKI_AUTHORITY_SERVICE_LXC_TAG=build-21720130629 +RUN mkdir -p "${OUTPUTDIR}/etc/super/containers/pki-authority" +COPY --from=ghcr.io/super-protocol/tee-pki-authority-service-lxc:build-21875562504 /pki-authority.tar "${OUTPUTDIR}/etc/super/containers/pki-authority/pki-authority.tar" +ADD rootfs/files/configs/pki-service/conf/lxc-swarm-template.yaml "${OUTPUTDIR}/etc/super/containers/pki-authority/lxc-swarm-template.yaml" +ADD rootfs/files/configs/pki-service/conf/dnsmasq.conf "${OUTPUTDIR}/etc/lxc/dnsmasq.conf" +ADD rootfs/files/configs/pki-service/conf/lxc-net "${OUTPUTDIR}/etc/default/lxc-net" +RUN mkdir -p "${OUTPUTDIR}/usr/local/bin/pki-authority" +ADD rootfs/files/configs/pki-service/scripts/*.py "${OUTPUTDIR}/usr/local/bin/pki-authority/" +ADD rootfs/files/configs/pki-service/scripts/*.sh "${OUTPUTDIR}/usr/local/bin/pki-authority/" +ADD rootfs/files/configs/pki-service/systemd/pki-authority-init.service "${OUTPUTDIR}/etc/systemd/system" +ADD rootfs/files/configs/pki-service/systemd/pki-authority.service "${OUTPUTDIR}/etc/systemd/system" +ADD rootfs/files/configs/pki-service/systemd/pki-authority-sync.service "${OUTPUTDIR}/etc/systemd/system" +RUN mkdir -p "${OUTPUTDIR}/etc/super/pki-authority-sync" +ADD rootfs/files/configs/pki-service/conf/secrets-config.yaml "${OUTPUTDIR}/etc/super/pki-authority-sync/secrets-config.yaml" +RUN chmod +x "${OUTPUTDIR}"/usr/local/bin/pki-authority/*.py +RUN ln -s /etc/systemd/system/pki-authority-init.service "${OUTPUTDIR}/etc/systemd/system/multi-user.target.wants/pki-authority-init.service" +RUN ln -s /etc/systemd/system/pki-authority.service "${OUTPUTDIR}/etc/systemd/system/multi-user.target.wants/pki-authority.service" +RUN ln -s /etc/systemd/system/pki-authority-sync.service "${OUTPUTDIR}/etc/systemd/system/multi-user.target.wants/pki-authority-sync.service" + ADD rootfs/files/configs/etc/multipath.conf.append /buildroot/files/configs/etc/multipath.conf.append ADD rootfs/files/configs/etc/sysctl.conf.append /buildroot/files/configs/etc/sysctl.conf.append @@ -283,6 +294,8 @@ RUN ln -sf /etc/systemd/system/hardening-vm.service "${OUTPUTDIR}/etc/systemd/sy # swarm services ADD rootfs/files/configs/etc/systemd/system/swarm-db.service ${OUTPUTDIR}/etc/systemd/system/swarm-db.service RUN ln -sf /etc/systemd/system/swarm-db.service "${OUTPUTDIR}/etc/systemd/system/multi-user.target.wants/swarm-db.service" +ADD rootfs/files/scripts/prepare_swarm_db_config.py ${OUTPUTDIR}/usr/local/bin/prepare_swarm_db_config.py +RUN chmod +x ${OUTPUTDIR}/usr/local/bin/prepare_swarm_db_config.py ADD rootfs/files/configs/usr/local/bin/swarm-cloud-api.sh ${OUTPUTDIR}/usr/local/bin/swarm-cloud-api.sh ADD rootfs/files/configs/etc/systemd/system/swarm-node.service ${OUTPUTDIR}/etc/systemd/system/swarm-node.service ADD rootfs/files/configs/usr/local/bin/swarm-node.sh ${OUTPUTDIR}/usr/local/bin/swarm-node.sh @@ -368,6 +381,13 @@ RUN chmod +x ${OUTPUTDIR}/etc/swarm-cloud/services/*/main.py ADD rootfs/files/scripts/setup_runtime_tools.sh /buildroot/files/scripts/ RUN chmod +x /buildroot/files/scripts/setup_runtime_tools.sh RUN --security=insecure /buildroot/files/scripts/setup_runtime_tools.sh + +# install pki-sync-client npm package globally (requires python3-venv from setup_runtime_tools) +ARG PKI_SYNC_CLIENT_VERSION=2.0.6 +ADD rootfs/files/scripts/install_sync_client.sh /buildroot/files/scripts/ +RUN chmod +x /buildroot/files/scripts/install_sync_client.sh +RUN --security=insecure /buildroot/files/scripts/install_sync_client.sh "${PKI_SYNC_CLIENT_VERSION}" + # MongoDB (install official mongodb-org 7.0 via Jammy repository inside VM rootfs) ADD rootfs/files/scripts/install_mongodb.sh /buildroot/files/scripts/ RUN --security=insecure bash /buildroot/files/scripts/install_mongodb.sh diff --git a/src/repos/swarm-db b/src/repos/swarm-db index bc8d0afb..f9899571 160000 --- a/src/repos/swarm-db +++ b/src/repos/swarm-db @@ -1 +1 @@ -Subproject commit bc8d0afbb78ac2153443677e7fdf4969ae29f119 +Subproject commit f9899571f670505a85486288cb1e255c2d16a870 diff --git a/src/rootfs/files/configs/etc/systemd/system/swarm-db.service b/src/rootfs/files/configs/etc/systemd/system/swarm-db.service index 5a621707..36f7c7fb 100644 --- a/src/rootfs/files/configs/etc/systemd/system/swarm-db.service +++ b/src/rootfs/files/configs/etc/systemd/system/swarm-db.service @@ -1,7 +1,7 @@ [Unit] Description=Swarm DB service -After=network-online.target local-fs.target -Wants=network-online.target +After=network-online.target local-fs.target pki-authority.service +Wants=network-online.target pki-authority.service RequiresMountsFor=/var /var/lib /var/lib/swarm-db ConditionPathExists=/usr/local/bin/swarm-db-linux-amd64 ConditionPathExists=/sp/swarm/node-db.yaml @@ -10,7 +10,11 @@ ConditionPathExists=/sp/swarm/node-db.yaml Type=simple WorkingDirectory=/ ExecStartPre=mkdir -p /var/lib/swarm-db/data -ExecStart=/usr/local/bin/swarm-db-linux-amd64 -config /sp/swarm/node-db.yaml +ExecStartPre=/usr/local/bin/prepare_swarm_db_config.py \ + --base-config /sp/swarm/node-db.yaml \ + --key-file /etc/swarm/swarm.key \ + --output-config /etc/swarm/swarm-db-config.yaml +ExecStart=/usr/local/bin/swarm-db-linux-amd64 -config /etc/swarm/swarm-db-config.yaml StandardOutput=append:/var/log/swarm-db.log StandardError=append:/var/log/swarm-db-err.log Restart=always diff --git a/src/rootfs/files/configs/pki-service/dnsmasq.conf b/src/rootfs/files/configs/pki-service/conf/dnsmasq.conf similarity index 100% rename from src/rootfs/files/configs/pki-service/dnsmasq.conf rename to src/rootfs/files/configs/pki-service/conf/dnsmasq.conf diff --git a/src/rootfs/files/configs/pki-service/lxc-net b/src/rootfs/files/configs/pki-service/conf/lxc-net similarity index 100% rename from src/rootfs/files/configs/pki-service/lxc-net rename to src/rootfs/files/configs/pki-service/conf/lxc-net diff --git a/src/rootfs/files/configs/pki-service/conf/lxc-swarm-template.yaml b/src/rootfs/files/configs/pki-service/conf/lxc-swarm-template.yaml new file mode 100644 index 00000000..5f43bf65 --- /dev/null +++ b/src/rootfs/files/configs/pki-service/conf/lxc-swarm-template.yaml @@ -0,0 +1,46 @@ +api: + httpsPort: 443 + httpPort: 80 + enabledApis: + - secrets + - pki +pki: + allowedChallenges: + - token + - tdx + - sev-snp + validateParamRules: + - type: tdx + signatureVerification: github + - type: sev-snp + signatureVerification: github + tokenStorage: + storageType: file + storageFolder: /app/swarm-storage + ownDomain: ca-subroot.super-protocol.svc.cluster.local + ownChallenge: + type: tdx + certParams: + ocspUrl: '' + keyStorage: + type: trusted + storage: + type: super + keysPath: /app/keys + mode: + role: swarm + swarmMode: init + storage: + storageType: file + storageFolder: /app/swarm-storage + networkSettings: + networkType: trusted +secretsStorage: + static: + swarmKey: dummy-swarm-key + storage: + storageType: file + storageFolder: /app/swarm-storage + validationCaBundle: + type: pki + diff --git a/src/rootfs/files/configs/pki-service/conf/secrets-config.yaml b/src/rootfs/files/configs/pki-service/conf/secrets-config.yaml new file mode 100644 index 00000000..f0e811e4 --- /dev/null +++ b/src/rootfs/files/configs/pki-service/conf/secrets-config.yaml @@ -0,0 +1,12 @@ +# PKI Sync Client - Secrets Configuration +secrets: + - secretName: basic_certificate + saveTo: /var/lib/lxc/pki-authority/rootfs/app/swarm-storage/basic_certificate + - secretName: basic_privateKey + saveTo: /var/lib/lxc/pki-authority/rootfs/app/swarm-storage/basic_privateKey + - secretName: lite_certificate + saveTo: /var/lib/lxc/pki-authority/rootfs/app/swarm-storage/lite_certificate + - secretName: lite_privateKey + saveTo: /var/lib/lxc/pki-authority/rootfs/app/swarm-storage/lite_privateKey + - secretName: swarmKey + saveTo: /etc/swarm/swarm.key diff --git a/src/rootfs/files/configs/pki-service/create-and-configure-pki.sh b/src/rootfs/files/configs/pki-service/create-and-configure-pki.sh deleted file mode 100755 index 7d2df1ed..00000000 --- a/src/rootfs/files/configs/pki-service/create-and-configure-pki.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -set -euo pipefail - -CONTAINER_NAME="pki-authority" - -if lxc-info -n "${CONTAINER_NAME}" &>/dev/null; then - echo "Container '${CONTAINER_NAME}' already exists." -else - echo "Container '${CONTAINER_NAME}' not found. Creating..." - lxc-create -n "${CONTAINER_NAME}" -t oci -- --url docker-archive://root/containers/pki-authority.tar - echo "Container '${CONTAINER_NAME}' created." -fi - -CPU_TYPE="untrusted" -if [[ -c "/dev/tdx_guest" ]] ; then - CPU_TYPE="tdx"; -elif [[ -c "/dev/sev-guest" ]]; then - CPU_TYPE="sev-snp"; -fi - -export CPU_TYPE="${CPU_TYPE}" - -SRC_YAML="/root/containers/lxc-template.yaml" -DST_YAML="/var/lib/lxc/pki-authority/rootfs/app/conf/lxc.yaml" - -if [[ -f "${SRC_YAML}" ]]; then - if command -v yq-go >/dev/null 2>&1; then - yq-go e '.pki.ownChallenge.type = strenv(CPU_TYPE)' "${SRC_YAML}" > "${DST_YAML}" - echo "Patched ${DST_YAML} with type: ${CPU_TYPE} using yq." - else - echo "Error: yq-go is not installed. Please install yq-go for YAML editing." - exit 1 - fi -else - echo "Error: ${SRC_YAML} not found." - exit 1 -fi - -# --- Trusted subroot env handling --- -TRUSTED_VARS=( - AS__pki__baseDomain - AS__pki__ownDomain - AS__pki__certParams__ocspUrl - AS__pki__mode__attestationServiceSource__baseUrl - AS__pki__mode__attestationServiceSource__caBundle -) - -SRC_SUBROOT_ENV="/sp/subroot.env" -DST_SUBROOT_ENV="/var/lib/lxc/pki-authority/rootfs/app/subroot.env" - -# If source exists, (re)create destination with only trusted variables -if [[ -f "${SRC_SUBROOT_ENV}" ]]; then - # Remove destination first to ensure a clean recreate - rm -f "${DST_SUBROOT_ENV}" - - # Header explaining autogenerated file - echo "# Autogenerated from ${SRC_SUBROOT_ENV}. Contains only trusted variables." > "${DST_SUBROOT_ENV}" - - for var in "${TRUSTED_VARS[@]}"; do - # capture first matching line in form VAR="value" - line="$(grep -m1 -E "^${var}=\".*\"" "${SRC_SUBROOT_ENV}" 2>/dev/null || true)" - if [[ -n "${line}" ]]; then - echo "${line}" >> "${DST_SUBROOT_ENV}" - fi - done - - chmod 0644 "${DST_SUBROOT_ENV}" || true - echo "Created ${DST_SUBROOT_ENV} with trusted variables." -else - echo "Info: ${SRC_SUBROOT_ENV} not found; skipping creation of ${DST_SUBROOT_ENV}" -fi - -CONFIG_FILE="/var/lib/lxc/pki-authority/config" -CONFIG_BAK="${CONFIG_FILE}.bak" - -# Always restore config from backup if backup exists -if [[ -f "${CONFIG_BAK}" ]]; then - cp "${CONFIG_BAK}" "${CONFIG_FILE}" -else - # Create backup before first patch - if [[ -f "${CONFIG_FILE}" ]]; then - cp "${CONFIG_FILE}" "${CONFIG_BAK}" - fi -fi - -# This MAC address is used to get a static IP address from DHCP, see /etc/lxc/dnsmasq.conf -echo "lxc.net.0.hwaddr = 4e:fc:0a:d5:2d:ff" >> "${CONFIG_FILE}" - -if [[ "${CPU_TYPE}" = "sev-snp" ]]; then - DEV_ID="$(stat -c '%t:%T' /dev/sev-guest | awk -F: '{printf "%d:%d\n", "0x"$1, "0x"$2}')" - echo "lxc.cgroup2.devices.allow = c ${DEV_ID} rwm" >> "${CONFIG_FILE}" - echo "lxc.mount.entry = /dev/sev-guest dev/sev-guest none bind,optional,create=file" >> "${CONFIG_FILE}" -elif [[ "${CPU_TYPE}" = "tdx" ]]; then - DEV_ID="$(stat -c '%t:%T' /dev/tdx_guest | awk -F: '{printf "%d:%d\n", "0x"$1, "0x"$2}')" - echo "lxc.cgroup2.devices.allow = c ${DEV_ID} rwm" >> "${CONFIG_FILE}" - echo "lxc.mount.entry = /dev/tdx_guest dev/tdx_guest none bind,optional,create=file" >> "${CONFIG_FILE}" - if [[ -f "/etc/tdx-attest.conf" ]]; then - echo "lxc.mount.entry = /etc/tdx-attest.conf etc/tdx-attest.conf none bind,ro,create=file" >> "${CONFIG_FILE}" - fi -fi diff --git a/src/rootfs/files/configs/pki-service/lxc-template.yaml b/src/rootfs/files/configs/pki-service/lxc-template.yaml deleted file mode 100644 index c336ee59..00000000 --- a/src/rootfs/files/configs/pki-service/lxc-template.yaml +++ /dev/null @@ -1,144 +0,0 @@ -api: - port: 443 -pki: - allowedChallenges: - - token - tokenChallengeFilePath: /app/access-token - ownDomain: ca-subroot.super-protocol.svc.cluster.local - ownChallenge: - type: untrusted - certParams: - ocspUrl: https://ocsp.certs.superprotocol.com/v1/ocsp - keyStorage: - type: trusted - storage: - type: super - keysPath: /app/keys - mode: - attestationServiceSource: - baseUrl: https://ca-subroot.certs.superprotocol.com/api/v1/pki - caBundle: | - -----BEGIN CERTIFICATE----- - MIIWzDCCFbSgAwIBAgIQRvHafieXotvccTAPGbFOlTANBgkqhkiG9w0BAQsFADB2 - MSIwIAYDVQQDExlTdXBlclByb3RvY29sIFRFRSBSb290IENBMQswCQYDVQQGEwJV - UzELMAkGA1UECBMCTlkxETAPBgNVBAcTCE5ldyBZb3JrMRYwFAYDVQQKEw1TdXBl - clByb3RvY29sMQswCQYDVQQLEwJJVDAeFw0yNTA3MjExNDU2MjNaFw0zNTA4MDEw - MDAwMDBaMHYxIjAgBgNVBAMTGVN1cGVyUHJvdG9jb2wgVEVFIFJvb3QgQ0ExCzAJ - BgNVBAYTAlVTMQswCQYDVQQIEwJOWTERMA8GA1UEBxMITmV3IFlvcmsxFjAUBgNV - BAoTDVN1cGVyUHJvdG9jb2wxCzAJBgNVBAsTAklUMIIBIjANBgkqhkiG9w0BAQEF - AAOCAQ8AMIIBCgKCAQEAuscjQuMdTyM+COTzmj1SFcCwBQtMxkK8uqk2dy7okwU0 - U2beMso47+AZ7hROOpRSCT2Z9lf5sJEI+Jw3ptdjxAK0ALvvokNF5/9Wg6IipLsO - sRaCgpsPwTNFWN374vnDdWQsi4hOlcKLBoUifSQrl7/dSLjz6qxUOCLifqYtdd4/ - Sln8G+9GHzmBtfq0CWB3xtjYFxXPCmuy5lUTJPg+QFL5EYY/e3/UrefmLxEXjskn - QbmpA9pMXexwdEzCXDVocPQJOh+SxR7K4ZGXpWxpMcP71jBJfPLKb46ijxyK2YZW - xzfWXYgkctKhPLx5v3M7vByqNLcn8Lm/VkwZy3hXhwIDAQABo4ITVDCCE1AwDwYD - VR0TAQH/BAUwAwEB/zAlBgNVHREEHjAcghpjYS5jZXJ0cy5zdXBlcnByb3RvY29s - LmNvbTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDgYDVR0PAQH/BAQD - AgKkMB8GA1UdIwQYMBaAFCVxTWT14hJHoY4lE0vbxcpala/DMB0GA1UdDgQWBBQl - cU1k9eISR6GOJRNL28XKWpWvwzAUBggrBgEDxTgBAQQIc2d4LWRjYXAwghKPBgsG - CSqGSIb4TYo5BgSCEn4DAAIAAAAAAAsAEACTmnIz95xMqZQKDbOVfwYHwuAeEM7b - IyMk8VaVVsIMaAAAAAALEA8O//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAFAAAAAAAAAOcAAAAAAAAAg090avZ3FKIIEzhH7K4PGXPW - 5VTSomOCmLCnduwrcNUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADTq - fKiANP6A8gnpfND4RC97piHrKl9gOTqwWHGua5edAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALq+ - EkZ4saPu4J+/2S4ClapSnxhp06LLcUkuKbsv9ZIhAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAADKEAAAaM6Hlshrc9tC24YIJocfnTlZYBKHTDTj0+mv/7Ov - lJ2x9T5sIaTNGfStH46LE1+5hVH3bPwJvDI0Ws2E/BBGDrdAjZv2T83/USoIQTTL - TZ+kcZqLMigsqNeJHh5uBLsF5uqzPNwBDeJF1nIi/MaV+qcGve37Eu8XoSCGSOCj - b2gLEA8O//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAVAAAAAAAAAOcAAAAAAAAAeP6M/QEJWg8Qiv9cQGJLk2EtbCi3PhqNKBecnd8O - BoYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIxPV3XXllA+lhN/d8aK - gpoAVqyN7XAUCwgbCUSQxXv/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAQALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANWq0gbaZ0iNLGHsRl9v - WJK1s51EREhaa5246jLZcL3NAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAArkN01FJ5vTX8xsD4zCyuu9jkUA276SlrfE9PttOJ2vf2ljC3d1oyCUyIPv+8d - fR9Jbk3FLW/LY+BFJaQKnnGIIAAAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRob - HB0eHwUAYg4AAC0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlFOHpDQ0JK - aWdBd0lCQWdJVVFaT1NWSy8vQlZkVkJXeUIwaHM4dURoTkc2Z3dDZ1lJS29aSXpq - MEVBd0l3CmNERWlNQ0FHQTFVRUF3d1pTVzUwWld3Z1UwZFlJRkJEU3lCUWJHRjBa - bTl5YlNCRFFURWFNQmdHQTFVRUNnd1IKU1c1MFpXd2dRMjl5Y0c5eVlYUnBiMjR4 - RkRBU0JnTlZCQWNNQzFOaGJuUmhJRU5zWVhKaE1Rc3dDUVlEVlFRSQpEQUpEUVRF - TE1Ba0dBMVVFQmhNQ1ZWTXdIaGNOTWpVd056QTVNVFV3T1RBd1doY05Nekl3TnpB - NU1UVXdPVEF3CldqQndNU0l3SUFZRFZRUUREQmxKYm5SbGJDQlRSMWdnVUVOTElF - TmxjblJwWm1sallYUmxNUm93R0FZRFZRUUsKREJGSmJuUmxiQ0JEYjNKd2IzSmhk - R2x2YmpFVU1CSUdBMVVFQnd3TFUyRnVkR0VnUTJ4aGNtRXhDekFKQmdOVgpCQWdN - QWtOQk1Rc3dDUVlEVlFRR0V3SlZVekJaTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5 - QXdFSEEwSUFCRFFBCkZNa2lVR1Z4S2luMnpIODB2MkMvbXpGQ0N1MnV0VHBOdE1V - TzhjMUJ6U1F1ZlZ0YjYyT2JFMUhOUVNvZmE4Q3IKdXVLeWJ0VHBRT3BaakZXQkFh - S2pnZ01PTUlJRENqQWZCZ05WSFNNRUdEQVdnQlNWYjEzTnZSdmg2VUJKeWRUMApN - ODRCVnd2ZVZEQnJCZ05WSFI4RVpEQmlNR0NnWHFCY2hscG9kSFJ3Y3pvdkwyRndh - UzUwY25WemRHVmtjMlZ5CmRtbGpaWE11YVc1MFpXd3VZMjl0TDNObmVDOWpaWEow - YVdacFkyRjBhVzl1TDNZMEwzQmphMk55YkQ5allUMXcKYkdGMFptOXliU1psYm1O - dlpHbHVaejFrWlhJd0hRWURWUjBPQkJZRUZPUHdhb25yYnpPZks0My8vZ3Y5M0Fm - QgpiMGNZTUE0R0ExVWREd0VCL3dRRUF3SUd3REFNQmdOVkhSTUJBZjhFQWpBQU1J - SUNPd1lKS29aSWh2aE5BUTBCCkJJSUNMRENDQWlnd0hnWUtLb1pJaHZoTkFRMEJB - UVFRM3cvN014Q2Vwbkt2V2Q2WXhZb0VJVENDQVdVR0NpcUcKU0liNFRRRU5BUUl3 - Z2dGVk1CQUdDeXFHU0liNFRRRU5BUUlCQWdFTE1CQUdDeXFHU0liNFRRRU5BUUlD - QWdFTApNQkFHQ3lxR1NJYjRUUUVOQVFJREFnRURNQkFHQ3lxR1NJYjRUUUVOQVFJ - RUFnRURNQkVHQ3lxR1NJYjRUUUVOCkFRSUZBZ0lBL3pBUkJnc3Foa2lHK0UwQkRR - RUNCZ0lDQVA4d0VBWUxLb1pJaHZoTkFRMEJBZ2NDQVFBd0VBWUwKS29aSWh2aE5B - UTBCQWdnQ0FRQXdFQVlMS29aSWh2aE5BUTBCQWdrQ0FRQXdFQVlMS29aSWh2aE5B - UTBCQWdvQwpBUUF3RUFZTEtvWklodmhOQVEwQkFnc0NBUUF3RUFZTEtvWklodmhO - QVEwQkFnd0NBUUF3RUFZTEtvWklodmhOCkFRMEJBZzBDQVFBd0VBWUxLb1pJaHZo - TkFRMEJBZzRDQVFBd0VBWUxLb1pJaHZoTkFRMEJBZzhDQVFBd0VBWUwKS29aSWh2 - aE5BUTBCQWhBQ0FRQXdFQVlMS29aSWh2aE5BUTBCQWhFQ0FRMHdId1lMS29aSWh2 - aE5BUTBCQWhJRQpFQXNMQXdQLy93QUFBQUFBQUFBQUFBQXdFQVlLS29aSWh2aE5B - UTBCQXdRQ0FBQXdGQVlLS29aSWh2aE5BUTBCCkJBUUdNR0JxQUFBQU1BOEdDaXFH - U0liNFRRRU5BUVVLQVFFd0hnWUtLb1pJaHZoTkFRMEJCZ1FRcDZtY096M0EKdDRB - SVRmdk84R1Y3cHpCRUJnb3Foa2lHK0UwQkRRRUhNRFl3RUFZTEtvWklodmhOQVEw - QkJ3RUJBZjh3RUFZTApLb1pJaHZoTkFRMEJCd0lCQWY4d0VBWUxLb1pJaHZoTkFR - MEJCd01CQWY4d0NnWUlLb1pJemowRUF3SURTUUF3ClJnSWhBTEVtR1U2VldkdG5n - YTZiTG5yaksrWEdWczlMVERSRkZURTRpcy9qU05tQkFpRUExY21KNUpkV0VIYlEK - L2dyWVJ2L3c4MytTQmpidTJKQWcxR0dJNTJmSjFUUT0KLS0tLS1FTkQgQ0VSVElG - SUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQ2xqQ0NB - ajJnQXdJQkFnSVZBSlZ2WGMyOUcrSHBRRW5KMVBRenpnRlhDOTVVTUFvR0NDcUdT - TTQ5QkFNQwpNR2d4R2pBWUJnTlZCQU1NRVVsdWRHVnNJRk5IV0NCU2IyOTBJRU5C - TVJvd0dBWURWUVFLREJGSmJuUmxiQ0JECmIzSndiM0poZEdsdmJqRVVNQklHQTFV - RUJ3d0xVMkZ1ZEdFZ1EyeGhjbUV4Q3pBSkJnTlZCQWdNQWtOQk1Rc3cKQ1FZRFZR - UUdFd0pWVXpBZUZ3MHhPREExTWpFeE1EVXdNVEJhRncwek16QTFNakV4TURVd01U - QmFNSEF4SWpBZwpCZ05WQkFNTUdVbHVkR1ZzSUZOSFdDQlFRMHNnVUd4aGRHWnZj - bTBnUTBFeEdqQVlCZ05WQkFvTUVVbHVkR1ZzCklFTnZjbkJ2Y21GMGFXOXVNUlF3 - RWdZRFZRUUhEQXRUWVc1MFlTQkRiR0Z5WVRFTE1Ba0dBMVVFQ0F3Q1EwRXgKQ3pB - SkJnTlZCQVlUQWxWVE1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdB - RU5TQi83dDIxbFhTTwoyQ3V6cHh3NzRlSkI3MkV5REdnVzVyWEN0eDJ0VlRMcTZo - S2s2eitVaVJaQ25xUjdwc092Z3FGZVN4bG1UbEpsCmVUbWkyV1l6M3FPQnV6Q0J1 - REFmQmdOVkhTTUVHREFXZ0JRaVpReldXcDAwaWZPRHRKVlN2MUFiT1NjR3JEQlMK - QmdOVkhSOEVTekJKTUVlZ1JhQkRoa0ZvZEhSd2N6b3ZMMk5sY25ScFptbGpZWFJs - Y3k1MGNuVnpkR1ZrYzJWeQpkbWxqWlhNdWFXNTBaV3d1WTI5dEwwbHVkR1ZzVTBk - WVVtOXZkRU5CTG1SbGNqQWRCZ05WSFE0RUZnUVVsVzlkCnpiMGI0ZWxBU2NuVTlE - UE9BVmNMM2xRd0RnWURWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1B - WUIKQWY4Q0FRQXdDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdYc1ZraTB3K2k2VllH - VzNVRi8yMnVhWGUwWUpEajFVZQpuQStUakQxYWk1Y0NJQ1liMVNBbUQ1eGtmVFZw - dm80VW95aVNZeHJEV0xtVVI0Q0k5Tkt5ZlBOKwotLS0tLUVORCBDRVJUSUZJQ0FU - RS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlDanpDQ0FqU2dB - d0lCQWdJVUltVU0xbHFkTkluemc3U1ZVcjlRR3prbkJxd3dDZ1lJS29aSXpqMEVB - d0l3CmFERWFNQmdHQTFVRUF3d1JTVzUwWld3Z1UwZFlJRkp2YjNRZ1EwRXhHakFZ - QmdOVkJBb01FVWx1ZEdWc0lFTnYKY25CdmNtRjBhVzl1TVJRd0VnWURWUVFIREF0 - VFlXNTBZU0JEYkdGeVlURUxNQWtHQTFVRUNBd0NRMEV4Q3pBSgpCZ05WQkFZVEFs - VlRNQjRYRFRFNE1EVXlNVEV3TkRVeE1Gb1hEVFE1TVRJek1USXpOVGsxT1Zvd2FE - RWFNQmdHCkExVUVBd3dSU1c1MFpXd2dVMGRZSUZKdmIzUWdRMEV4R2pBWUJnTlZC - QW9NRVVsdWRHVnNJRU52Y25CdmNtRjAKYVc5dU1SUXdFZ1lEVlFRSERBdFRZVzUw - WVNCRGJHRnlZVEVMTUFrR0ExVUVDQXdDUTBFeEN6QUpCZ05WQkFZVApBbFZUTUZr - d0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFQzZuRXdNRElZWk9qL2lQ - V3NDemFFS2k3CjFPaU9TTFJGaFdHamJuQlZKZlZua1k0dTNJamtEWVlMME14TzRt - cXN5WWpsQmFsVFZZeEZQMnNKQks1emxLT0IKdXpDQnVEQWZCZ05WSFNNRUdEQVdn - QlFpWlF6V1dwMDBpZk9EdEpWU3YxQWJPU2NHckRCU0JnTlZIUjhFU3pCSgpNRWVn - UmFCRGhrRm9kSFJ3Y3pvdkwyTmxjblJwWm1sallYUmxjeTUwY25WemRHVmtjMlZ5 - ZG1salpYTXVhVzUwClpXd3VZMjl0TDBsdWRHVnNVMGRZVW05dmRFTkJMbVJsY2pB - ZEJnTlZIUTRFRmdRVUltVU0xbHFkTkluemc3U1YKVXI5UUd6a25CcXd3RGdZRFZS - MFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUV3Q2dZSQpL - b1pJemowRUF3SURTUUF3UmdJaEFPVy81UWtSK1M5Q2lTRGNOb293THVQUkxzV0dm - L1lpN0dTWDk0Qmd3VHdnCkFpRUE0SjBsckhvTXMrWG81by9zWDZPOVFXeEhSQXZa - VUdPZFJRN2N2cVJYYXFJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgAwDQYJ - KoZIhvcNAQELBQADggEBADk76T9z80TGctAh/LvBX8LMCLZVb/F2nw7mQy9Q7N1w - 6fo7TRV0XV6/yvctv3eB0oly0ybcuBZmARkokKAy47Sl6EhO16t2/mpp3CYx7+5A - +Rx5Mupwtziwa6IXfPa3Ml+e99HMXv8CBSbEP6NiMvxNJPXbncjdWI6T+EWYnB9O - n7HcKweXpk3sPBPvm7Tyq0n3Q7+3rM5JaPr6O4+ksVG/TepqHoYF+KG9AdJyzv6v - 69HeBsvyBfQ1BhX8vqAKJDxbML0eYzDWZ6tDqMuJ/CdqTGJ97d0YISTJ2eRTmXiP - jm1g0H4p0FUGuDItBX7q+mkfWkTBgl1fR0ovP0YaWvk= - -----END CERTIFICATE----- diff --git a/src/rootfs/files/configs/pki-service/scripts/pki_configure.py b/src/rootfs/files/configs/pki-service/scripts/pki_configure.py new file mode 100755 index 00000000..692280f0 --- /dev/null +++ b/src/rootfs/files/configs/pki-service/scripts/pki_configure.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +""" +PKI Authority LXC container configuration. +Configures the container with network, device access, and runtime settings. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent)) +from pki_helpers import ( + log, LogLevel, detect_cpu_type, detect_vm_mode, detect_network_type, + patch_yaml_config, patch_lxc_config, mount_vm_certs, get_pki_authority_param, + setup_iptables, update_pccs_url, generate_swarm_key, load_swarm_key, + read_network_type_from_certificate, + PKI_SERVICE_NAME, VMMode, NetworkType, STORAGE_PATH +) + + +def main(): + """Main configuration logic.""" + log(LogLevel.INFO, "Starting PKI Authority configuration") + + # Check if container exists + if not Path(f"/var/lib/lxc/{PKI_SERVICE_NAME}").exists(): + log(LogLevel.ERROR, f"Container '{PKI_SERVICE_NAME}' does not exist") + sys.exit(1) + + # Detect environment + cpu_type = detect_cpu_type() + vm_mode = detect_vm_mode() + + log(LogLevel.INFO, f"CPU type: {cpu_type}") + log(LogLevel.INFO, f"VM mode: {vm_mode.value}") + + # Network type detection based on VM mode + if vm_mode == VMMode.SWARM_INIT: + # In swarm-init mode: read from kernel cmdline + network_type = detect_network_type() + log(LogLevel.INFO, f"Network type (from cmdline): {network_type.value}") + else: + # In swarm-normal mode: verify required files exist in swarm-storage + # These files should be synced by pki-authority-sync.service before this script runs + required_files = [ + "basic_certificate", + "basic_privateKey", + "lite_certificate", + "lite_privateKey" + ] + + missing_files = [f for f in required_files if not (STORAGE_PATH / f).exists()] + if missing_files: + error_msg = ( + f"Required files missing in {STORAGE_PATH}: {', '.join(missing_files)}. " + "These files should be synced by pki-authority-sync.service before this script runs." + ) + log(LogLevel.ERROR, error_msg) + sys.exit(1) + + log(LogLevel.INFO, "All required swarm-storage files are present") + + # Read network type from certificate OID + network_type = read_network_type_from_certificate() + log(LogLevel.INFO, f"Network type (from certificate): {network_type.value}") + + try: + try: + pki_domain = get_pki_authority_param("domain") + except (FileNotFoundError, ValueError) as e: + log(LogLevel.WARN, f"Failed to read domain from config: {e}") + pki_domain = "localhost" + log(LogLevel.INFO, f"Using default domain: {pki_domain}") + + network_id = get_pki_authority_param("networkID") + + # Get or generate swarm key based on VM mode + if vm_mode == VMMode.SWARM_INIT: + # In swarm-init mode: try to load existing key, generate if doesn't exist + try: + swarm_key = load_swarm_key() + except FileNotFoundError: + swarm_key = generate_swarm_key() + else: + # In swarm-normal mode: key must exist + swarm_key = load_swarm_key() + + patch_yaml_config( + cpu_type=cpu_type, + vm_mode=vm_mode, + network_type=network_type, + pki_domain=pki_domain, + network_id=network_id, + swarm_key=swarm_key + ) + log(LogLevel.INFO, "YAML config patched successfully") + + patch_lxc_config(cpu_type) + log(LogLevel.INFO, "LXC config patched successfully") + + if vm_mode == VMMode.SWARM_NORMAL: + mount_vm_certs() + + # Setup iptables rules + setup_iptables() + log(LogLevel.INFO, "iptables rules configured successfully") + + # Update PCCS URL in container + update_pccs_url() + log(LogLevel.INFO, "PCCS URL updated successfully") + + except Exception as e: + log(LogLevel.ERROR, f"Configuration failed: {e}") + sys.exit(1) + + log(LogLevel.INFO, "PKI Authority configuration completed successfully") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/rootfs/files/configs/pki-service/scripts/pki_healthcheck.py b/src/rootfs/files/configs/pki-service/scripts/pki_healthcheck.py new file mode 100755 index 00000000..33c80b1d --- /dev/null +++ b/src/rootfs/files/configs/pki-service/scripts/pki_healthcheck.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +""" +PKI Authority health check. +Checks if the PKI Authority service inside the container is healthy. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent)) +from pki_helpers import LXCContainer, log, LogLevel, PKI_SERVICE_NAME + + +def main(): + """Main health check logic.""" + log(LogLevel.INFO, "Starting PKI Authority health check") + + # Create container manager + container = LXCContainer(PKI_SERVICE_NAME) + + # Check if container is running + if not container.is_running(): + log(LogLevel.ERROR, f"Container '{PKI_SERVICE_NAME}' is not running") + sys.exit(1) + + # Check if service inside container is healthy + if not container.is_service_healthy(): + log(LogLevel.ERROR, "PKI Authority service is not healthy") + sys.exit(1) + + log(LogLevel.INFO, "PKI Authority service is healthy") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/rootfs/files/configs/pki-service/scripts/pki_helpers.py b/src/rootfs/files/configs/pki-service/scripts/pki_helpers.py new file mode 100755 index 00000000..f6f91a78 --- /dev/null +++ b/src/rootfs/files/configs/pki-service/scripts/pki_helpers.py @@ -0,0 +1,943 @@ +#!/usr/bin/env python3 +""" +PKI Authority LXC container management helpers. +""" + +import os +import re +import secrets +import shutil +import subprocess +import sys +import urllib.request +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import List, Optional + +import yaml +from cryptography import x509 +from cryptography.x509.oid import ObjectIdentifier + +PKI_SERVICE_NAME = "pki-authority" +SERVICE_INSIDE_CONTAINER = "tee-pki" +BRIDGE_NAME = "lxcbr0" +PCCS_PORT = "8081" +PKI_SERVICE_EXTERNAL_PORT = "8443" +CONTAINER_IP = "10.0.3.100" +WIREGUARD_INTERFACE = "wg0" +EXTERNAL_INTERFACE = "enp0s1" # Default external network interface +CONTAINER_ROOTFS = f"/var/lib/lxc/{PKI_SERVICE_NAME}/rootfs" +STORAGE_PATH = Path(f"{CONTAINER_ROOTFS}/app/swarm-storage") +IPTABLES_RULE_COMMENT = f"{PKI_SERVICE_NAME}-rule" +SWARM_ENV_YAML = "/sp/swarm/swarm-env.yaml" +VM_CERTS_HOST_DIR = "/etc/super/certs/vm" +VM_CERTS_CONTAINER_DIR = "app/vm-certs" # Relative path for lxc.mount.entry +VM_CERT_FILE_NAME = "vm_cert.pem" +VM_CERT_CONTAINER_FILE = f"/{VM_CERTS_CONTAINER_DIR}/{VM_CERT_FILE_NAME}" +SWARM_KEY_FILE = "/etc/swarm/swarm.key" +OID_CUSTOM_EXTENSION_NETWORK_TYPE = "1.3.6.1.3.8888.4" + + +class LogLevel(Enum): + """Log levels for structured logging.""" + INFO = "INFO" + WARN = "WARN" + ERROR = "ERROR" + DEBUG = "DEBUG" + + +def log(level: LogLevel, message: str): + """Log message with timestamp, service name and level.""" + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"[{timestamp}] [{PKI_SERVICE_NAME}] [{level.value}] {message}", file=sys.stderr) + + +class VMMode(Enum): + """VM mode types.""" + SWARM_INIT = "swarm-init" + SWARM_NORMAL = "swarm-normal" + + +class NetworkType(Enum): + """Network type types.""" + TRUSTED = "trusted" + UNTRUSTED = "untrusted" + + +class LXCContainer: + """Manager for LXC container operations.""" + + def __init__(self, container_name: str = PKI_SERVICE_NAME): + self.container_name = container_name + + def start(self, timeout: int = 30) -> int: + """Start LXC container. Returns exit code.""" + log(LogLevel.INFO, f"Starting LXC container {self.container_name}") + result = subprocess.run( + ["lxc-start", "-n", self.container_name], + capture_output=True, + text=True, + timeout=timeout, + check=False + ) + return result.returncode + + def stop(self, graceful_timeout: int = 30, command_timeout: int = 60) -> int: + """Stop LXC container gracefully. Returns exit code.""" + log(LogLevel.INFO, f"Stopping LXC container {self.container_name} gracefully") + result = subprocess.run( + ["lxc-stop", "-n", self.container_name, "-t", str(graceful_timeout)], + capture_output=True, + text=True, + timeout=command_timeout, + check=False + ) + return result.returncode + + def destroy(self) -> int: + """Destroy LXC container. Returns exit code.""" + log(LogLevel.INFO, f"Destroying LXC container {self.container_name}") + result = subprocess.run( + ["lxc-destroy", "-n", self.container_name, "-f"], + capture_output=True, + text=True, + timeout=60, + check=False + ) + + if result.returncode != 0: + log(LogLevel.ERROR, f"Failed to destroy container: {result.stderr}") + + return result.returncode + + def is_running(self) -> bool: + """Check if LXC container is running.""" + try: + result = subprocess.run( + ["lxc-ls", "--running"], + capture_output=True, + text=True, + check=False + ) + if self.container_name not in result.stdout: + log(LogLevel.INFO, f"LXC container {self.container_name} is not running") + return False + return True + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.ERROR, f"Failed to check LXC container status: {error}") + return False + + def get_ip(self) -> Optional[str]: + """Get container IP address.""" + try: + result = subprocess.run( + ["lxc-info", "-n", self.container_name, "-iH"], + capture_output=True, + text=True, + check=False + ) + container_ip = result.stdout.strip() if result.stdout.strip() else None + return container_ip + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.ERROR, f"Failed to get container IP: {error}") + return None + + def create( + self, + archive_path: str = "/etc/super/containers/pki-authority/pki-authority.tar" + ) -> bool: + """Create LXC container if it doesn't exist. + + Returns True if created or already exists. + """ + # Check if container already exists + result = subprocess.run( + ["lxc-info", "-n", self.container_name], + capture_output=True, + text=True, + check=False + ) + + if result.returncode == 0: + log(LogLevel.INFO, f"Container '{self.container_name}' already exists.") + return True + + log(LogLevel.INFO, f"Container '{self.container_name}' not found. Creating...") + try: + subprocess.run( + [ + "lxc-create", + "-n", self.container_name, + "-t", "oci", + "--", + "--url", f"docker-archive:{archive_path}" + ], + check=True + ) + log(LogLevel.INFO, f"Container '{self.container_name}' created.") + return True + except subprocess.CalledProcessError as error: + log(LogLevel.ERROR, f"Failed to create container: {error}") + return False + + def is_service_healthy(self, healthcheck_url: str = "/healthcheck") -> bool: + """Check if service inside container is running and healthy.""" + try: + # Check service status inside container + result = subprocess.run( + [ + "lxc-attach", "-n", self.container_name, "--", + "systemctl", "is-active", SERVICE_INSIDE_CONTAINER + ], + capture_output=True, + text=True, + check=False + ) + status = result.stdout.strip() + + if status != "active": + log(LogLevel.INFO, f"Service {SERVICE_INSIDE_CONTAINER} status: {status}") + return False + + # Service is active, check healthcheck endpoint + container_ip = self.get_ip() + if not container_ip: + log(LogLevel.INFO, "Could not get container IP") + return False + + # Perform HTTP healthcheck + try: + req = urllib.request.Request(f"http://{container_ip}{healthcheck_url}") + with urllib.request.urlopen(req, timeout=5) as response: + if response.status == 200: + return True + + log( + LogLevel.INFO, + f"Healthcheck returned status: {response.status}" + ) + return False + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.INFO, f"Healthcheck failed: {error}") + return False + + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.ERROR, f"Failed to check service health: {error}") + return False + + +def detect_cpu_type() -> str: + """Detect CPU type based on available devices.""" + if Path("/dev/tdx_guest").is_char_device(): + return "tdx" + if Path("/dev/sev-guest").is_char_device(): + return "sev-snp" + return "untrusted" + + +def detect_vm_mode() -> VMMode: + """Detect VM mode from kernel command line.""" + try: + with open("/proc/cmdline", "r", encoding="utf-8") as file: + cmdline = file.read() + + if "vm_mode=swarm-init" in cmdline: + return VMMode.SWARM_INIT + return VMMode.SWARM_NORMAL + except FileNotFoundError: + return VMMode.SWARM_NORMAL + + +def detect_network_type() -> NetworkType: + """Detect network type from kernel command line. + + Returns: + NetworkType.UNTRUSTED if allow_untrusted=true is present in cmdline, + otherwise NetworkType.TRUSTED. + """ + try: + with open("/proc/cmdline", "r", encoding="utf-8") as file: + cmdline = file.read() + + if "allow_untrusted=true" in cmdline: + return NetworkType.UNTRUSTED + return NetworkType.TRUSTED + except FileNotFoundError: + return NetworkType.TRUSTED + + +def read_network_type_from_certificate(cert_path: Path = STORAGE_PATH / "basic_certificate") -> NetworkType: + """Read network type from certificate's custom OID extension. + + Args: + cert_path: Path to PEM certificate file. + + Returns: + NetworkType.TRUSTED or NetworkType.UNTRUSTED based on the value of the + custom extension identified by OID_CUSTOM_EXTENSION_NETWORK_TYPE. + Defaults to NetworkType.TRUSTED if the extension is not present or has + another value. + """ + try: + if not cert_path.exists(): + error_msg = f"Certificate not found at {cert_path}" + log(LogLevel.ERROR, error_msg) + raise FileNotFoundError(error_msg) + + with open(cert_path, "rb") as f: + cert = x509.load_pem_x509_certificate(f.read()) + + # Custom OID for network type + network_type_oid = ObjectIdentifier(OID_CUSTOM_EXTENSION_NETWORK_TYPE) + + try: + # Try to get the extension by OID + extension = cert.extensions.get_extension_for_oid(network_type_oid) + # Extension value is typically ASN.1 encoded, get raw value + value = extension.value.value.decode('utf-8').strip() + + if value == NetworkType.TRUSTED.value: + log(LogLevel.INFO, f"Network type from certificate OID: {value}") + return NetworkType.TRUSTED + elif value == NetworkType.UNTRUSTED.value: + log(LogLevel.INFO, f"Network type from certificate OID: {value}") + return NetworkType.UNTRUSTED + else: + log(LogLevel.WARN, f"Unknown network type value '{value}' in OID, defaulting to trusted") + return NetworkType.TRUSTED + + except x509.ExtensionNotFound: + log(LogLevel.INFO, f"OID {OID_CUSTOM_EXTENSION_NETWORK_TYPE} not found in certificate, defaulting to trusted") + return NetworkType.TRUSTED + + except Exception as e: + log(LogLevel.ERROR, f"Error reading certificate: {e}, defaulting to trusted") + return NetworkType.TRUSTED + + +def read_yaml_config_param(param_path: str) -> Optional[str]: + """Read parameter from container's yaml configuration. + + Args: + param_path: Dot-separated path to parameter (e.g., 'pki.ownDomain'). + + Returns: + Parameter value as string, or None if not found or error. + """ + yaml_config_path = Path(f"{CONTAINER_ROOTFS}/app/conf/lxc.yaml") + + if not yaml_config_path.exists(): + log(LogLevel.DEBUG, f"YAML config not found: {yaml_config_path}") + return None + + try: + with open(yaml_config_path, "r", encoding="utf-8") as file: + config = yaml.safe_load(file) + + if not config: + log(LogLevel.DEBUG, f"Empty YAML config: {yaml_config_path}") + return None + + # Navigate through nested dictionary using dot-separated path + value = config + for key in param_path.split('.'): + if isinstance(value, dict): + value = value.get(key) + if value is None: + return None + else: + return None + + return str(value) if value is not None else None + + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.DEBUG, f"Failed to read {param_path} from YAML config: {error}") + return None + + +def get_pki_authority_param(param_name: str) -> str: + """Read PKI authority parameter from swarm-env.yaml. + + Args: + param_name: Name of the parameter under pki-authority section. + + Returns: + Parameter value as string. + + Raises: + FileNotFoundError: If swarm-env.yaml does not exist. + ValueError: If configuration is empty or parameter is not found. + Exception: For other errors during reading. + """ + swarm_env_path = Path(SWARM_ENV_YAML) + + if not swarm_env_path.exists(): + error_msg = f"Swarm environment config not found: {SWARM_ENV_YAML}" + log(LogLevel.ERROR, error_msg) + raise FileNotFoundError(error_msg) + + try: + with open(swarm_env_path, "r", encoding="utf-8") as file: + config = yaml.safe_load(file) + + if not config: + error_msg = f"Empty configuration in {SWARM_ENV_YAML}" + log(LogLevel.ERROR, error_msg) + raise ValueError(error_msg) + + param_value = config.get("pki-authority", {}).get(param_name) + if not param_value: + error_msg = ( + f"No {param_name} found in {SWARM_ENV_YAML} " + f"under pki-authority.{param_name}" + ) + log(LogLevel.ERROR, error_msg) + raise ValueError(error_msg) + + log(LogLevel.INFO, f"Read {param_name} from config: {param_value}") + return param_value + + except (FileNotFoundError, ValueError): + raise + except Exception as error: # pylint: disable=broad-exception-caught + error_msg = f"Failed to read {param_name} from {SWARM_ENV_YAML}: {error}" + log(LogLevel.ERROR, error_msg) + raise Exception(error_msg) from error + + +def generate_swarm_key() -> str: + """Generate new 32-byte swarm key and save to file. + + Returns: + Swarm key as hex string (64 characters). + + Raises: + Exception: If failed to save key to file. + """ + swarm_key_path = Path(SWARM_KEY_FILE) + + log(LogLevel.INFO, "Generating new 32-byte swarm key") + swarm_key = secrets.token_hex(32) # 32 bytes = 64 hex characters + + try: + # Ensure directory exists + if not swarm_key_path.parent.exists(): + swarm_key_path.parent.mkdir(parents=True, exist_ok=True) + log(LogLevel.INFO, f"Created directory {swarm_key_path.parent}") + + with open(swarm_key_path, "w", encoding="utf-8") as file: + file.write(swarm_key) + + # Set restrictive permissions (600) + swarm_key_path.chmod(0o600) + + log(LogLevel.INFO, f"Swarm key generated and saved to {SWARM_KEY_FILE}") + return swarm_key + except Exception as error: + error_msg = f"Failed to save swarm key: {error}" + log(LogLevel.ERROR, error_msg) + raise Exception(error_msg) from error + + +def load_swarm_key() -> str: + """Load existing swarm key from file. + + Returns: + Swarm key as hex string (64 characters). + + Raises: + FileNotFoundError: If swarm key file doesn't exist. + ValueError: If swarm key format is invalid. + Exception: For other errors during reading. + """ + swarm_key_path = Path(SWARM_KEY_FILE) + + if not swarm_key_path.exists(): + error_msg = f"Swarm key file {SWARM_KEY_FILE} not found" + log(LogLevel.ERROR, error_msg) + raise FileNotFoundError(error_msg) + + log(LogLevel.INFO, f"Reading swarm key from {SWARM_KEY_FILE}") + + try: + with open(swarm_key_path, "r", encoding="utf-8") as file: + swarm_key = file.read().strip() + + # Validate key format (should be 64 hex characters) + if not re.match(r'^[0-9a-fA-F]{64}$', swarm_key): + error_msg = f"Invalid swarm key format in {SWARM_KEY_FILE}. Expected 64 hex characters." + log(LogLevel.ERROR, error_msg) + raise ValueError(error_msg) + + log(LogLevel.INFO, "Swarm key loaded successfully") + return swarm_key + except (FileNotFoundError, ValueError): + raise + except Exception as error: + error_msg = f"Failed to read swarm key: {error}" + log(LogLevel.ERROR, error_msg) + raise Exception(error_msg) from error + + +def patch_yaml_config( + cpu_type: str, + vm_mode: VMMode, + pki_domain: str, + network_type: NetworkType, + network_id: str, + swarm_key: str +): + """Set own challenge type in LXC container configuration.""" + template_name = "lxc-swarm-template.yaml" + log( + LogLevel.INFO, + f"Detected {vm_mode.value} mode, using swarm template" + ) + + src_yaml = Path(f"/etc/super/containers/pki-authority/{template_name}") + dst_yaml = Path(f"{CONTAINER_ROOTFS}/app/conf/lxc.yaml") + + if not src_yaml.exists(): + log(LogLevel.ERROR, f"Error: {src_yaml} not found.") + sys.exit(1) + + # Load YAML, modify, and save + with open(src_yaml, "r", encoding="utf-8") as file: + config = yaml.safe_load(file) + + # Set the CPU type in the configuration + if "pki" not in config: + config["pki"] = {} + if "ownChallenge" not in config["pki"]: + config["pki"]["ownChallenge"] = {} + config["pki"]["ownChallenge"]["type"] = cpu_type + + # For untrusted, generate random deviceIdHex (32 bytes) + if cpu_type == "untrusted": + # Check if untrusted CPU type is running in trusted network + if network_type != NetworkType.UNTRUSTED: + error_msg = ( + "Cannot run untrusted machine in trusted network. " + f"CPU type: {cpu_type}, Network type: {network_type.value}" + ) + log(LogLevel.ERROR, error_msg) + raise ValueError(error_msg) + + device_id_hex = secrets.token_hex(32) + config["pki"]["ownChallenge"]["deviceIdHex"] = device_id_hex + log(LogLevel.INFO, f"Generated deviceIdHex for untrusted type: {device_id_hex}") + + # Add 'untrusted' to allowedChallenges if network type is untrusted + if network_type == NetworkType.UNTRUSTED: + if "allowedChallenges" not in config["pki"]: + config["pki"]["allowedChallenges"] = [] + if "untrusted" not in config["pki"]["allowedChallenges"]: + config["pki"]["allowedChallenges"].append("untrusted") + log(LogLevel.INFO, "Added 'untrusted' to allowedChallenges") + + # Set ownDomain from parameter + if pki_domain: + config["pki"]["ownDomain"] = pki_domain + log(LogLevel.INFO, f"Set ownDomain to: {pki_domain}") + + # Set mode.swarmMode + if "mode" not in config["pki"]: + config["pki"]["mode"] = {} + + mode_value = "init" if vm_mode == VMMode.SWARM_INIT else "normal" + config["pki"]["mode"]["swarmMode"] = mode_value + log(LogLevel.INFO, f"Set swarmMode to: {mode_value}") + + # Set networkSettings + if network_type or network_id: + if "networkSettings" not in config["pki"]["mode"]: + config["pki"]["mode"]["networkSettings"] = {} + + if network_type: + config["pki"]["mode"]["networkSettings"]["networkType"] = network_type.value + log(LogLevel.INFO, f"Set networkSettings.networkType: {network_type.value}") + + if network_id: + config["pki"]["mode"]["networkSettings"]["networkID"] = network_id + log(LogLevel.INFO, f"Set networkSettings.networkID: {network_id}") + + # Set secretsStorage with swarmKey + if swarm_key: + if "secretsStorage" not in config: + config["secretsStorage"] = {} + if "static" not in config["secretsStorage"]: + config["secretsStorage"]["static"] = {} + config["secretsStorage"]["static"]["swarmKey"] = swarm_key + log(LogLevel.INFO, "Set swarmKey in secretsStorage.static") + + # Ensure destination directory exists + dst_yaml.parent.mkdir(parents=True, exist_ok=True) + + # Write modified YAML + with open(dst_yaml, "w", encoding="utf-8") as file: + yaml.dump(config, file, default_flow_style=False) + + +def patch_lxc_config(cpu_type: str): + """Patch LXC container configuration.""" + config_file = Path(f"/var/lib/lxc/{PKI_SERVICE_NAME}/config") + config_bak = Path(f"{config_file}.bak") + + # Always restore config from backup if backup exists + if config_bak.exists(): + shutil.copy(config_bak, config_file) + else: + # Create backup before first patch + if config_file.exists(): + shutil.copy(config_file, config_bak) + + # Append MAC address configuration + with open(config_file, "a", encoding="utf-8") as file: + file.write("lxc.net.0.hwaddr = 4e:fc:0a:d5:2d:ff\n") + + # Add device-specific configuration + if cpu_type == "sev-snp": + dev_path = Path("/dev/sev-guest") + stat_info = dev_path.stat() + dev_id = f"{os.major(stat_info.st_rdev)}:{os.minor(stat_info.st_rdev)}" + + with open(config_file, "a", encoding="utf-8") as file: + file.write(f"lxc.cgroup2.devices.allow = c {dev_id} rwm\n") + file.write( + "lxc.mount.entry = /dev/sev-guest dev/sev-guest " + "none bind,optional,create=file\n" + ) + + elif cpu_type == "tdx": + dev_path = Path("/dev/tdx_guest") + stat_info = dev_path.stat() + dev_id = f"{os.major(stat_info.st_rdev)}:{os.minor(stat_info.st_rdev)}" + + with open(config_file, "a", encoding="utf-8") as file: + file.write(f"lxc.cgroup2.devices.allow = c {dev_id} rwm\n") + file.write( + "lxc.mount.entry = /dev/tdx_guest dev/tdx_guest " + "none bind,optional,create=file\n" + ) + + if Path("/etc/tdx-attest.conf").exists(): + file.write( + "lxc.mount.entry = /etc/tdx-attest.conf etc/tdx-attest.conf " + "none bind,ro,create=file\n" + ) + +def mount_vm_certs(): + """Mount vm certs directory into container and patch YAML config with vmCertificatePath.""" + src_dir = Path(VM_CERTS_HOST_DIR) + if not src_dir.exists(): + log(LogLevel.ERROR, f"Error: {src_dir} not found") + sys.exit(1) + + # Add mount entry to LXC config + config_file = Path(f"/var/lib/lxc/{PKI_SERVICE_NAME}/config") + mount_entry = f"lxc.mount.entry = {VM_CERTS_HOST_DIR} {VM_CERTS_CONTAINER_DIR} none bind,ro,create=dir\n" + + if config_file.exists(): + with open(config_file, "r", encoding="utf-8") as file: + content = file.read() + + if mount_entry.strip() not in content: + with open(config_file, "a", encoding="utf-8") as file: + file.write(mount_entry) + log(LogLevel.INFO, f"Added mount entry for {VM_CERTS_HOST_DIR}") + else: + log(LogLevel.INFO, f"Mount entry for {VM_CERTS_HOST_DIR} already exists") + else: + log(LogLevel.ERROR, f"Error: LXC config file {config_file} not found") + sys.exit(1) + + # Update YAML config with vmCertificatePath + dst_yaml = Path(f"{CONTAINER_ROOTFS}/app/conf/lxc.yaml") + + if not dst_yaml.exists(): + log(LogLevel.ERROR, f"Error: {dst_yaml} not found") + sys.exit(1) + + with open(dst_yaml, "r", encoding="utf-8") as file: + config = yaml.safe_load(file) + + if not config: + log(LogLevel.ERROR, f"Empty YAML config: {dst_yaml}") + sys.exit(1) + + if "pki" not in config: + config["pki"] = {} + if "mode" not in config["pki"]: + config["pki"]["mode"] = {} + + config["pki"]["mode"]["vmCertificatePath"] = VM_CERT_CONTAINER_FILE + log(LogLevel.INFO, f"Set vmCertificatePath to: {VM_CERT_CONTAINER_FILE}") + + with open(dst_yaml, "w", encoding="utf-8") as file: + yaml.dump(config, file, default_flow_style=False) + + +def get_bridge_ip(bridge_name: str) -> str: + """Get host IP address on the LXC bridge.""" + result = subprocess.run( + ["ip", "-4", "addr", "show", bridge_name], + capture_output=True, + text=True, + check=False + ) + + if result.returncode != 0: + log( + LogLevel.ERROR, + f"Error: Could not determine IP address for bridge {bridge_name}" + ) + sys.exit(1) + + # Parse IP address from output + match = re.search(r'inet\s+(\d+\.\d+\.\d+\.\d+)', result.stdout) + if not match: + log( + LogLevel.ERROR, + f"Error: Could not determine IP address for bridge {bridge_name}" + ) + sys.exit(1) + + return match.group(1) + + +def enable_route_localnet(bridge_name: str): + """Enable route_localnet for the bridge.""" + sysctl_key = f"net.ipv4.conf.{bridge_name}.route_localnet" + + result = subprocess.run( + ["sysctl", "-n", sysctl_key], + capture_output=True, + text=True, + check=False + ) + + if result.returncode == 0 and result.stdout.strip() == "1": + log(LogLevel.INFO, f"route_localnet already enabled for {bridge_name}") + else: + subprocess.run( + ["sysctl", "-w", f"{sysctl_key}=1"], + check=True + ) + log(LogLevel.INFO, f"Enabled route_localnet for {bridge_name}") + + +def get_external_interface() -> str: + """Detect external network interface from default route. + + Returns: + Name of the external network interface used for default route. + Falls back to EXTERNAL_INTERFACE constant if detection fails. + """ + try: + # Get default route interface + result = subprocess.run( + ["ip", "route", "show", "default"], + capture_output=True, + text=True, + check=False + ) + + if result.returncode == 0 and result.stdout: + # Parse output like: "default via 192.168.1.1 dev enp0s1 proto dhcp metric 100" + match = re.search(r'dev\s+(\S+)', result.stdout) + if match: + interface = match.group(1) + log(LogLevel.INFO, f"Detected external interface from default route: {interface}") + return interface + + log(LogLevel.WARN, f"Could not detect external interface, using default: {EXTERNAL_INTERFACE}") + return EXTERNAL_INTERFACE + except Exception as error: # pylint: disable=broad-exception-caught + log(LogLevel.WARN, f"Failed to detect external interface: {error}, using default: {EXTERNAL_INTERFACE}") + return EXTERNAL_INTERFACE + + +def delete_iptables_rules(): + """Delete all iptables rules for PKI container (NAT and filter tables).""" + # Delete rules from NAT table chains: PREROUTING, OUTPUT, POSTROUTING + for chain in ["PREROUTING", "OUTPUT", "POSTROUTING"]: + result = subprocess.run( + ["iptables", "-t", "nat", "-S", chain], + capture_output=True, text=True, check=True + ) + + rules = result.stdout.splitlines() + + for rule in rules: + # Delete rules that contain our comment + if IPTABLES_RULE_COMMENT in rule: + delete_rule = rule.replace("-A", "-D", 1) + subprocess.run(["iptables", "-t", "nat"] + delete_rule.split()[1:], check=True) + log(LogLevel.INFO, f"Deleted iptables NAT rule: {delete_rule}") + + # Delete rules from filter table (INPUT chain) + result = subprocess.run( + ["iptables", "-S", "INPUT"], + capture_output=True, text=True, check=True + ) + + rules = result.stdout.splitlines() + + for rule in rules: + # Delete rules that contain our comment + if IPTABLES_RULE_COMMENT in rule: + delete_rule = rule.replace("-A", "-D", 1) + subprocess.run(["iptables"] + delete_rule.split()[1:], check=True) + log(LogLevel.INFO, f"Deleted iptables INPUT rule: {delete_rule}") + + +def ensure_iptables_rule(check_args: List[str], add_args: List[str], description: str): + """Ensure iptables rule exists, add if missing.""" + log(LogLevel.INFO, f"Checking iptables rule: {description}") + + check_result = subprocess.run(check_args, capture_output=True, check=False) + + if check_result.returncode == 0: + log(LogLevel.INFO, "Rule already exists") + else: + subprocess.run(add_args, check=True) + log(LogLevel.INFO, "Rule added") + +def setup_iptables(): + """Setup iptables NAT rules for LXC container access to host services.""" + host_ip = get_bridge_ip(BRIDGE_NAME) + external_interface = get_external_interface() + + enable_route_localnet(BRIDGE_NAME) + + # Rule 1: PCCS DNAT + ensure_iptables_rule( + check_args=[ + "iptables", "-t", "nat", "-C", "PREROUTING", + "-p", "tcp", + "-d", host_ip, + "--dport", PCCS_PORT, + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "DNAT", + "--to-destination", f"127.0.0.1:{PCCS_PORT}" + ], + add_args=[ + "iptables", "-t", "nat", "-A", "PREROUTING", + "-p", "tcp", + "-d", host_ip, + "--dport", PCCS_PORT, + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "DNAT", + "--to-destination", f"127.0.0.1:{PCCS_PORT}" + ], + description=f"PCCS DNAT {host_ip}:{PCCS_PORT} -> 127.0.0.1:{PCCS_PORT}" + ) + + # Rule 2: MASQUERADE + ensure_iptables_rule( + check_args=[ + "iptables", "-t", "nat", "-C", "POSTROUTING", + "-s", f"{CONTAINER_IP}/32", + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "MASQUERADE" + ], + add_args=[ + "iptables", "-t", "nat", "-A", "POSTROUTING", + "-s", f"{CONTAINER_IP}/32", + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "MASQUERADE" + ], + description=f"POSTROUTING MASQUERADE for {CONTAINER_IP}/32" + ) + + # Rule 3: Allow port 8081 on lxcbr0 + ensure_iptables_rule( + check_args=[ + "iptables", "-C", "INPUT", + "-i", "lxcbr0", + "-p", "tcp", + "--dport", "8081", + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "ACCEPT" + ], + add_args=[ + "iptables", "-A", "INPUT", + "-i", "lxcbr0", + "-p", "tcp", + "--dport", "8081", + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "ACCEPT" + ], + description="Allow TCP port 8081 on lxcbr0" + ) + + # Rule 4: DNAT external port 8443 to container port 443 + ensure_iptables_rule( + check_args=[ + "iptables", "-t", "nat", "-C", "PREROUTING", + "-i", external_interface, + "-p", "tcp", + "--dport", PKI_SERVICE_EXTERNAL_PORT, + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "DNAT", + "--to-destination", f"{CONTAINER_IP}:443" + ], + add_args=[ + "iptables", "-t", "nat", "-A", "PREROUTING", + "-i", external_interface, + "-p", "tcp", + "--dport", PKI_SERVICE_EXTERNAL_PORT, + "-m", "comment", "--comment", IPTABLES_RULE_COMMENT, + "-j", "DNAT", + "--to-destination", f"{CONTAINER_IP}:443" + ], + description=f"PKI external access: {external_interface}:{PKI_SERVICE_EXTERNAL_PORT} -> {CONTAINER_IP}:443" + ) + + +def update_pccs_url(): + """Update PCCS URL in QCNL configuration.""" + qcnl_conf = Path(f"{CONTAINER_ROOTFS}/etc/sgx_default_qcnl.conf") + qcnl_conf_bak = Path(f"{qcnl_conf}.bak") + + host_ip = get_bridge_ip(BRIDGE_NAME) + + pccs_url = f"https://{host_ip}:{PCCS_PORT}/sgx/certification/v4/" + + if not qcnl_conf.exists(): + log(LogLevel.ERROR, f"Error: {qcnl_conf} not found") + sys.exit(1) + + if not qcnl_conf_bak.exists(): + shutil.copy(qcnl_conf, qcnl_conf_bak) + + shutil.copy(qcnl_conf_bak, qcnl_conf) + + with open(qcnl_conf, "r", encoding="utf-8") as file: + content = file.read() + + content = re.sub( + r'"pccs_url":\s*"[^"]*"', + f'"pccs_url": "{pccs_url}"', + content + ) + + with open(qcnl_conf, "w", encoding="utf-8") as file: + file.write(content) + +def save_property_into_fs(file_name: str, content: bytes): + """Save property content to filesystem.""" + STORAGE_PATH.mkdir(parents=True, exist_ok=True) + file_path = STORAGE_PATH / file_name + file_path.write_bytes(content) + + +def read_property_from_fs(file_name: str) -> tuple[bool, bytes]: + """Read property content from filesystem.""" + file_path = STORAGE_PATH / file_name + if file_path.exists(): + content = file_path.read_bytes() + if content: + return (True, content) + return (False, b"") diff --git a/src/rootfs/files/configs/pki-service/scripts/pki_init.py b/src/rootfs/files/configs/pki-service/scripts/pki_init.py new file mode 100755 index 00000000..ff90937f --- /dev/null +++ b/src/rootfs/files/configs/pki-service/scripts/pki_init.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +""" +PKI Authority LXC container initialization. +Creates the container from OCI archive if it doesn't exist. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent)) +from pki_helpers import LXCContainer, log, LogLevel + + +def main(): + """Main initialization logic.""" + log(LogLevel.INFO, "Starting PKI Authority initialization") + + # Create container using LXCContainer class + container = LXCContainer() + if not container.create(): + log(LogLevel.ERROR, "Container creation failed") + sys.exit(1) + + log(LogLevel.INFO, "PKI Authority initialization completed successfully") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/rootfs/files/configs/pki-service/systemd/pki-authority-init.service b/src/rootfs/files/configs/pki-service/systemd/pki-authority-init.service new file mode 100644 index 00000000..b1848a41 --- /dev/null +++ b/src/rootfs/files/configs/pki-service/systemd/pki-authority-init.service @@ -0,0 +1,12 @@ +[Unit] +Description=PKI authority initialization +After=lxc.service lxc-net.service lxc-monitord.service +Requires=lxc.service lxc-net.service lxc-monitord.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/python3 /usr/local/bin/pki-authority/pki_init.py + +[Install] +WantedBy=multi-user.target diff --git a/src/rootfs/files/configs/pki-service/systemd/pki-authority-sync.service b/src/rootfs/files/configs/pki-service/systemd/pki-authority-sync.service new file mode 100644 index 00000000..8707e39b --- /dev/null +++ b/src/rootfs/files/configs/pki-service/systemd/pki-authority-sync.service @@ -0,0 +1,21 @@ +[Unit] +Description=PKI authority sync client service +ConditionKernelCommandLine=!vm_mode=swarm-init +After=network-online.target pki-authority-init.service nvidia-persistenced.service +Wants=network-online.target nvidia-persistenced.service + +[Service] +Type=oneshot +Environment=NODE_ENV=production +ExecStart=/usr/bin/node /usr/bin/pki-sync-client sync \ + --config /etc/super/pki-authority-sync/secrets-config.yaml \ + --swarm-env /sp/swarm/swarm-env.yaml \ + --save-certs-to-dir /etc/super/certs/vm \ + --save-certs-prefix vm \ + --disable-server-identity-check \ + -v +Restart=on-failure +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/src/rootfs/files/configs/pki-service/pki-authority.service b/src/rootfs/files/configs/pki-service/systemd/pki-authority.service similarity index 54% rename from src/rootfs/files/configs/pki-service/pki-authority.service rename to src/rootfs/files/configs/pki-service/systemd/pki-authority.service index 612289f5..80144634 100644 --- a/src/rootfs/files/configs/pki-service/pki-authority.service +++ b/src/rootfs/files/configs/pki-service/systemd/pki-authority.service @@ -1,8 +1,8 @@ [Unit] Description=PKI authority lxc-container -After=lxc.service lxc-net.service lxc-monitord.service nvidia-persistenced.service -Requires=lxc.service lxc-net.service lxc-monitord.service -Wants=nvidia-persistenced.service +After=pki-authority-init.service pki-authority-sync.service pccs.service +Requires=pki-authority-init.service pccs.service +Wants=pki-authority-sync.service [Service] Type=simple @@ -10,7 +10,7 @@ Restart=always RestartSec=5 TimeoutStartSec=3min -ExecStartPre=/usr/local/bin/create-and-configure-pki.sh +ExecStartPre=/usr/bin/python3 /usr/local/bin/pki-authority/pki_configure.py ExecStart=/usr/bin/lxc-start -n pki-authority -F diff --git a/src/rootfs/files/scripts/install_pccs.sh b/src/rootfs/files/scripts/install_pccs.sh index e14642d3..6400a9c3 100755 --- a/src/rootfs/files/scripts/install_pccs.sh +++ b/src/rootfs/files/scripts/install_pccs.sh @@ -67,10 +67,10 @@ function move_pccs_to_custom_location() { function create_pccs_config() { log_info "creating PCCS configuration directory"; - mkdir -p "${OUTPUTDIR}${PCCS_INSTALL_DIR}/config/"; + mkdir -p "${OUTPUTDIR}${PCCS_ORIGINAL_LOCATION}/${PCCS_DIRNAME}/config/"; log_info "creating PCCS configuration file"; - cat > "${OUTPUTDIR}${PCCS_INSTALL_DIR}/config/default.json" << EOL + cat > "${OUTPUTDIR}${PCCS_ORIGINAL_LOCATION}/${PCCS_DIRNAME}/config/default.json" << EOL { "HTTPS_PORT" : ${PCCS_PORT}, "hosts" : "127.0.0.1", @@ -80,7 +80,7 @@ function create_pccs_config() { "RefreshSchedule": "0 0 1 * *", "UserTokenHash" : "${USER_TOKEN}", "AdminTokenHash" : "${USER_TOKEN}", - "CachingFillMode" : "REQ", + "CachingFillMode" : "LAZY", "LogLevel" : "debug", "DB_CONFIG" : "sqlite", "sqlite" : { @@ -107,6 +107,16 @@ function create_pccs_config() { EOL } +function generate_ssl_keys() { + log_info "generating SSL keys for PCCS"; + mkdir -p "${OUTPUTDIR}${PCCS_ORIGINAL_LOCATION}/${PCCS_DIRNAME}/ssl_key"; + + chroot "${OUTPUTDIR}" /bin/bash -c "cd ${PCCS_ORIGINAL_LOCATION}/${PCCS_DIRNAME} && \ + openssl genrsa -out ssl_key/private.pem 2048 && \ + openssl req -new -key ssl_key/private.pem -out ssl_key/csr.pem -subj '/CN=localhost' && \ + openssl x509 -req -days 365 -in ssl_key/csr.pem -signkey ssl_key/private.pem -out ssl_key/file.crt"; +} + function set_pccs_permissions() { log_info "setting PCCS permissions"; chroot "${OUTPUTDIR}" /bin/bash -c "chown -R pccs:pccs ${PCCS_INSTALL_DIR}/${PCCS_DIRNAME} && chmod -R 750 ${PCCS_INSTALL_DIR}/${PCCS_DIRNAME}"; @@ -141,10 +151,11 @@ function enable_pccs_service() { chroot_init; add_intel_sgx_repository; install_pccs_package; -move_pccs_to_custom_location; create_pccs_config; +generate_ssl_keys; update_pccs_service; enable_pccs_service; +move_pccs_to_custom_location; set_pccs_permissions; chroot_deinit; diff --git a/src/rootfs/files/scripts/install_sync_client.sh b/src/rootfs/files/scripts/install_sync_client.sh new file mode 100644 index 00000000..f30a53e5 --- /dev/null +++ b/src/rootfs/files/scripts/install_sync_client.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# bash unofficial strict mode; +set -euo pipefail; + +# public, required +# OUTPUTDIR + +# public, optional +# $1 - PKI_SYNC_CLIENT_VERSION - version to install, if not set - installs latest + +# private +BUILDROOT="/buildroot"; + +# init loggggging; +source "${BUILDROOT}/files/scripts/log.sh"; + +# chroot functions +source "${BUILDROOT}/files/scripts/chroot.sh"; + +function install_sync_client() { + local PKI_SYNC_CLIENT_VERSION="${1:-}"; + local PACKAGE_NAME="@super-protocol/pki-sync-client"; + local PACKAGE_SPEC="${PACKAGE_NAME}"; + + if [ -n "${PKI_SYNC_CLIENT_VERSION}" ]; then + PACKAGE_SPEC="${PACKAGE_NAME}@${PKI_SYNC_CLIENT_VERSION}"; + log_info "installing ${PACKAGE_SPEC} npm package globally"; + else + PACKAGE_SPEC="${PACKAGE_NAME}@latest"; + log_info "installing ${PACKAGE_SPEC} npm package globally"; + fi + + chroot "${OUTPUTDIR}" /bin/bash -c "npm install -g ${PACKAGE_SPEC}"; + log_info "${PACKAGE_SPEC} installed successfully"; +} + +chroot_init; +install_sync_client "${1:-}"; +chroot_deinit; diff --git a/src/rootfs/files/scripts/prepare_swarm_db_config.py b/src/rootfs/files/scripts/prepare_swarm_db_config.py new file mode 100644 index 00000000..bb3c5b54 --- /dev/null +++ b/src/rootfs/files/scripts/prepare_swarm_db_config.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +""" +Prepare swarm-db configuration by adding encryption key from swarm.key +""" + +import sys +import re +import yaml +import argparse +from pathlib import Path + + +def prepare_swarm_db_config(base_config_path: str, key_path: str, output_path: str) -> None: + """ + Read base config, add encryption section with key from key_path, save to output_path + + Args: + base_config_path: Path to base node-db.yaml template + key_path: Path to swarm.key file (must contain 64-char hex string) + output_path: Path to save final config + """ + # Check if key file exists + if not Path(key_path).exists(): + raise FileNotFoundError(f"Encryption key file not found: {key_path}") + + # Read base configuration + with open(base_config_path, 'r') as f: + config = yaml.safe_load(f) + + # Read and validate encryption key + with open(key_path, 'r') as f: + encryption_key = f.read().strip() + + # Validate key format: must be 64 hex characters + if not re.match(r'^[0-9a-fA-F]{64}$', encryption_key): + raise ValueError( + f"Invalid key format: must be 64 hex characters (0-9, a-f, A-F), " + f"got {len(encryption_key)} characters" + ) + + # Add encryption section to memberlist + if 'memberlist' not in config: + config['memberlist'] = {} + + config['memberlist']['encryption'] = { + 'mode': 'static', + 'static_value': encryption_key + } + + # Save final configuration + with open(output_path, 'w') as f: + yaml.dump(config, f, default_flow_style=False, sort_keys=False) + + print(f"Swarm DB config prepared successfully: {output_path}") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Prepare swarm-db configuration by adding encryption key' + ) + parser.add_argument( + '--base-config', + required=True, + help='Path to base node-db.yaml template' + ) + parser.add_argument( + '--key-file', + required=True, + help='Path to swarm.key file (64-char hex string)' + ) + parser.add_argument( + '--output-config', + required=True, + help='Path to save final configuration' + ) + + args = parser.parse_args() + + try: + prepare_swarm_db_config(args.base_config, args.key_file, args.output_config) + except Exception as e: + print(f"Error preparing swarm-db config: {e}", file=sys.stderr) + sys.exit(1) diff --git a/src/rootfs/files/scripts/setup_runtime_tools.sh b/src/rootfs/files/scripts/setup_runtime_tools.sh index 11f3a7a1..336da5ef 100644 --- a/src/rootfs/files/scripts/setup_runtime_tools.sh +++ b/src/rootfs/files/scripts/setup_runtime_tools.sh @@ -20,7 +20,7 @@ function setup_runtime_tools() { log_info "installing runtime packages into rootfs (python3, redis, mysql client, openssl, netcat, dns tools)" chroot "${OUTPUTDIR}" /usr/bin/apt update chroot "${OUTPUTDIR}" /usr/bin/apt install -y --no-install-recommends \ - mysql-client python3 python3-pip redis-server redis-sentinel redis-tools openssl netcat-openbsd dnsutils + mysql-client python3 python3-pip python3-venv redis-server redis-sentinel redis-tools openssl netcat-openbsd dnsutils chroot "${OUTPUTDIR}" /usr/bin/apt clean log_info "installing Python runtime dependencies"