diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 9a8c118..e0c0c84 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -49,11 +49,11 @@ jobs: printf '%s' "$OCI_API_KEY_PEM" > ~/.oci/oci_api_key.pem chmod 600 ~/.oci/oci_api_key.pem cat > ~/.oci/config < tofu/oci/backend-config.tfvars env: TF_BACKEND_CONFIG: ${{ secrets.TF_BACKEND_CONFIG }} + - name: Install OCI CLI + run: pip install oci-cli --quiet + + - name: Check free tier capacity + env: + OCI_COMPARTMENT_OCID: ${{ secrets.OCI_COMPARTMENT_OCID }} + run: | + echo "Querying live OCI state for compartment ${OCI_COMPARTMENT_OCID}" + + INSTANCES=$(oci compute instance list \ + --compartment-id "$OCI_COMPARTMENT_OCID" \ + --all --output json 2>/dev/null || echo '{"data":[]}') + + LIVE_STATES='.["lifecycle-state"] != "TERMINATING" and .["lifecycle-state"] != "TERMINATED"' + A1_FILTER="select(.shape==\"VM.Standard.A1.Flex\") | select($LIVE_STATES)" + MICRO_FILTER="select(.shape==\"VM.Standard.E2.1.Micro\") | select($LIVE_STATES)" + + CURRENT_OCPUS=$(echo "$INSTANCES" | \ + jq "[.data[] | $A1_FILTER | (.\"shape-config\".ocpus // 0)] | add // 0") + CURRENT_RAM=$(echo "$INSTANCES" | \ + jq "[.data[] | $A1_FILTER | (.\"shape-config\".\"memory-in-gbs\" // 0)] | add // 0") + CURRENT_MICRO=$(echo "$INSTANCES" | \ + jq "[.data[] | $MICRO_FILTER] | length") + + REQUESTED_OCPUS=$(grep -oE 'ocpus\s*=\s*[0-9]+' tofu/oci/terraform.tfvars \ + | awk -F'=' '{s+=int($2)} END {print s+0}') + REQUESTED_RAM=$(grep -oE 'memory_gb\s*=\s*[0-9]+' tofu/oci/terraform.tfvars \ + | awk -F'=' '{s+=int($2)} END {print s+0}') + REQUESTED_MICRO=$(grep -c 'micro_nodes' tofu/oci/terraform.tfvars || echo 0) + + MAX_AMPERE_OCPUS=4 + MAX_AMPERE_RAM_GB=24 + MAX_MICRO_INSTANCES=1 + + echo "A1 live: ${CURRENT_OCPUS}/${MAX_AMPERE_OCPUS} OCPU, ${CURRENT_RAM}/${MAX_AMPERE_RAM_GB} GB" + echo "A1 tfvars: ${REQUESTED_OCPUS} OCPU, ${REQUESTED_RAM} GB" + echo "Micro: live=${CURRENT_MICRO}, tfvars=${REQUESTED_MICRO}, limit=${MAX_MICRO_INSTANCES}" + + FAIL=0 + if [ "$(echo "$REQUESTED_OCPUS > $MAX_AMPERE_OCPUS" | bc)" = "1" ]; then + echo "ERROR: tfvars requests ${REQUESTED_OCPUS} A1 OCPU but limit is ${MAX_AMPERE_OCPUS}" + FAIL=1 + fi + if [ "$(echo "$CURRENT_OCPUS > $MAX_AMPERE_OCPUS" | bc)" = "1" ]; then + echo "ERROR: live A1 OCPU=${CURRENT_OCPUS} already exceeds limit=${MAX_AMPERE_OCPUS} — drift detected" + FAIL=1 + fi + if [ "$(echo "$REQUESTED_RAM > $MAX_AMPERE_RAM_GB" | bc)" = "1" ]; then + echo "ERROR: tfvars requests ${REQUESTED_RAM} GB A1 RAM but limit is ${MAX_AMPERE_RAM_GB} GB" + FAIL=1 + fi + if [ "$(echo "$CURRENT_RAM > $MAX_AMPERE_RAM_GB" | bc)" = "1" ]; then + echo "ERROR: live A1 RAM=${CURRENT_RAM} GB already exceeds limit=${MAX_AMPERE_RAM_GB} GB — drift detected" + FAIL=1 + fi + if [ "$REQUESTED_MICRO" -gt "$MAX_MICRO_INSTANCES" ]; then + echo "ERROR: tfvars requests ${REQUESTED_MICRO} Micro but limit is ${MAX_MICRO_INSTANCES}" + FAIL=1 + fi + if [ "$CURRENT_MICRO" -gt "$MAX_MICRO_INSTANCES" ]; then + echo "ERROR: live Micro=${CURRENT_MICRO} exceeds limit=${MAX_MICRO_INSTANCES} — drift" + FAIL=1 + fi + exit $FAIL + - name: Setup OpenTofu uses: opentofu/setup-opentofu@v2.0.0 with: @@ -86,13 +152,16 @@ jobs: -var="compartment_ocid=$OCI_COMPARTMENT_OCID" \ -var="talos_image_ocid=${{ steps.talos_image.outputs.ocid }}" \ -var="omni_join_token=$OMNI_JOIN_TOKEN" \ + -var="omni_endpoint=$OMNI_ENDPOINT" \ -var="tailscale_auth_key=$TAILSCALE_AUTH_KEY" \ + -var="oci_config_profile=DEFAULT" \ -var-file=terraform.tfvars \ -out=tfplan env: OCI_TENANCY_OCID: ${{ secrets.OCI_TENANCY_OCID }} OCI_COMPARTMENT_OCID: ${{ secrets.OCI_COMPARTMENT_OCID }} OMNI_JOIN_TOKEN: ${{ secrets.OMNI_JOIN_TOKEN }} + OMNI_ENDPOINT: ${{ secrets.OMNI_ENDPOINT }} TAILSCALE_AUTH_KEY: ${{ secrets.TAILSCALE_AUTH_KEY }} TF_LOG: WARN @@ -120,11 +189,11 @@ jobs: printf '%s' "$OCI_API_KEY_PEM" > ~/.oci/oci_api_key.pem chmod 600 ~/.oci/oci_api_key.pem cat > ~/.oci/config < tofu/oci/backend-config.tfvars diff --git a/.gitignore b/.gitignore index 1f179f4..0d1c610 100644 --- a/.gitignore +++ b/.gitignore @@ -89,6 +89,11 @@ flake.lock .dagger/ dagger/__pycache__/ +# AI tooling +.specstory/ +docs/plans/ +.claude/ + # Artifacts artifacts/ *.qcow2 diff --git a/FREE_TIER_RESOURCES.md b/FREE_TIER_RESOURCES.md new file mode 100644 index 0000000..6c38ba5 --- /dev/null +++ b/FREE_TIER_RESOURCES.md @@ -0,0 +1,169 @@ +# OCI Always Free Resources — Complete Reference + +> Last verified: March 2026 against OCI service limits documentation and API. +> Covers two OCI account types with different Always Free behaviours. + +--- + +## Two Types of OCI Free Tier + +OCI has two distinct account types, both offering "Always Free" resources, but with +differences in how some limits are applied: + +| Feature | Always Free Account | PAYG Account (with Always Free) | +|---------|--------------------|---------------------------------| +| Account type | Dedicated free-tier account | Pay-as-you-go with free allowances | +| E2.1.Micro (x86 bastion) | ✅ Available (up to 2) | ✅ Available (up to 2) | +| A1.Flex OCPU granularity | Integer (1, 2, 3…) | Integer (1, 2, 3…) | +| A1.Flex total allowance | 4 OCPUs / 24 GB RAM | 4 OCPUs / 24 GB RAM | +| Load Balancer (10 Mbps) | ✅ 1 × Always Free | ✅ 1 × Always Free | +| Network Load Balancer | ✅ 1 × Always Free | ✅ 1 × Always Free | +| Block Storage | 200 GB total | 200 GB total | +| Object Storage | 20 GB | 20 GB | + +> **How to identify free resources in the API**: OCI's service limit API marks Always +> Free resources with descriptions containing "Always Free" (e.g. `lb-10mbps-micro-count` +> = "10Mbps **Always Free** Load Balancer Count"). Some Always Free resources (e.g. the +> Network Load Balancer) do not carry this marker in the API response but are still free +> — cross-reference with the [official Always Free documentation](#references). + +--- + +## Compute + +### Ampere A1.Flex (ARM64) + +- **Shape**: `VM.Standard.A1.Flex` +- **Billing type**: `LIMITED_FREE` — reported by OCI shape API +- **Total allowance**: **4 OCPUs and 24 GB RAM** across all instances in the tenancy +- **OCPU granularity**: Integer values only (1, 2, 3, 4) +- **Architecture**: ARM64 (Ampere Altra) + +**Always Free configurations**: + +| Instances | OCPUs each | RAM each | Total OCPUs | Total RAM | +|-----------|-----------|---------|------------|----------| +| 4 | 1 | 6 GB | 4 | 24 GB ✅ | +| 3 | 1 | 8 GB | 3 | 24 GB (1 OCPU unused) | +| 2 | 2 | 12 GB | 4 | 24 GB ✅ | +| 1 | 4 | 24 GB | 4 | 24 GB ✅ | + +> To maximise both OCPUs and RAM: **4 × (1 OCPU / 6 GB)** + +### VM.Standard.E2.1.Micro (x86, AMD) + +- **Shape**: `VM.Standard.E2.1.Micro` (fixed shape, not Flex) +- **Count**: Up to **2 instances** +- **CPU**: 1/8 OCPU per instance +- **RAM**: 1 GB per instance +- **Architecture**: x86_64 (AMD EPYC) +- **Available in both Always Free and PAYG accounts** + +--- + +## Storage + +### Block Volume Storage + +| Limit | Value | Source | +|-------|-------|--------| +| Total free storage | **200 GB** total per tenancy | `total-free-storage-gb` = "Free Volume Size (GB)" | +| Free backups | **5** | `free-backup-count` = "Free Backup Counts" | +| Minimum boot volume | 47 GB per instance | OCI minimum | + +**Storage planning** (boot volumes count toward the 200 GB total): + +| Config | Boot volumes | Remaining for data | +|--------|-------------|-------------------| +| 4 × A1.Flex + 2 × Micro | 4×47 + 2×47 = 282 GB | ❌ exceeds 200 GB | +| 4 × A1.Flex only | 4×47 = 188 GB | 12 GB data | +| 3 × A1.Flex only | 3×50 = 150 GB | 50 GB data | +| 2 × A1.Flex + 1 × Micro | 2×47 + 47 = 141 GB | 59 GB data | + +> Boot volumes are included in the 200 GB total. Plan storage allocations carefully. + +### Object Storage + +- **Capacity**: 20 GB standard storage +- **API Requests**: 50,000/month (10,000 PUT, 50,000 GET) +- **No free archive tier** beyond standard limits + +--- + +## Networking + +### Virtual Cloud Networks (VCN) + +- 2 VCNs, unlimited subnets per VCN +- Internet Gateway, NAT Gateway (1 per VCN), Service Gateway: free + +### Load Balancer + +| Type | Free? | API identifier | Notes | +|------|-------|----------------|-------| +| **Flexible LB (10 Mbps)** | ✅ **Always Free** | `lb-10mbps-micro-count` | 1 instance, L4+L7 | +| Flexible LB (>10 Mbps) | ❌ Paid | `lb-flexible-count` | Pay by bandwidth | +| **Network LB** | ✅ **Always Free** | `max-nlb-flexible-count` | 1 instance, L4 only | + +> **Kubernetes (OCI CCM)**: To provision the free 10 Mbps LB instead of a paid +> flexible LB, annotate your Service with +> `service.beta.kubernetes.io/oci-load-balancer-shape: "10Mbps"`. +> Without this annotation the CCM defaults to a paid flexible shape. + +### Public IP Addresses + +- **Reserved IPs**: 2 reserved public IPv4 addresses (free) +- **Ephemeral IPs**: Assigned to instances at no cost + +### Data Transfer + +- **Outbound**: 10 TB/month free +- **Inbound**: Always free + +--- + +## Database + +### Autonomous Database + +- 2 databases, 1 OCPU each, 20 GB storage each +- Types: ATP (OLTP), ADW (analytics), JSON + +### NoSQL Database + +- 3 tables, 25 GB per table, 133M reads + 133M writes/month + +--- + +## Service Limits Quick Reference + +| Resource | Always Free Amount | +|----------|-------------------| +| A1.Flex OCPUs | **4 total** | +| A1.Flex RAM | **24 GB total** | +| E2.1.Micro instances | **2** (both account types) | +| Block Storage | **200 GB total** | +| Block Storage Backups | **5** | +| Object Storage | **20 GB** | +| Load Balancer (10 Mbps) | **1** | +| Network Load Balancer | **1** | +| Reserved Public IPs | **2** | +| VCNs | **2** | +| Outbound Transfer | **10 TB/month** | +| Autonomous Databases | **2** (1 OCPU, 20 GB each) | +| NoSQL Tables | **3** (25 GB each) | +| Functions | **2M invocations/month** | +| API Gateway | **1M requests/month** | +| Monitoring | **500M datapoints/month** | + +--- + +## References + +- [OCI Always Free Documentation](https://docs.oracle.com/en-us/iaas/Content/FreeTier/freetier_topic-Always_Free_Resources.htm) +- [OCI Service Limits](https://docs.oracle.com/en-us/iaas/Content/General/Concepts/servicelimits.htm) +- [OCI Pricing](https://www.oracle.com/cloud/price-list.html) + +--- + +**Last verified:** March 2026 — OCI service limits documentation and API diff --git a/docs/OCI_RESERVED_IPS_AND_LB.md b/docs/OCI_RESERVED_IPS_AND_LB.md new file mode 100644 index 0000000..dab20fc --- /dev/null +++ b/docs/OCI_RESERVED_IPS_AND_LB.md @@ -0,0 +1,186 @@ +# OCI Reserved IPs and Load Balancer — Usage Patterns + +## Reserved IP Pricing + +**Reserved public IPs are free on OCI — assigned or unassigned, any quantity.** + +This differs from AWS (charges for idle Elastic IPs) and Azure (charges for static IPs). +Verified empirically: a reserved IP was created, attached to a running instance, detached, and deleted with zero cost impact. + +--- + +## Current Reserved IPs (managed by Terraform) + +| Name | IP | Purpose | State | +|------|----|---------|-------| +| `k8s-ingress-ip` | `84.8.144.240` | K8s ingress / OCI CCM LB | Unassigned (pre-provisioned) | +| `bastion-ip` | `145.241.217.226` | Micro instance fixed address | Assigned to micro-instance-1 | +| `ampere-instance-1-ip` | `144.21.60.245` | Ampere node 1 | Assigned (replaced lost ephemeral) | + +> **Warning:** If you delete an ephemeral public IP from a VNIC, OCI will not auto-reassign one. +> You must create a reserved IP and assign it manually. Avoid deleting ephemeral IPs outside Terraform. + +--- + +## Option A — OCI Cloud Controller Manager owns the Load Balancer + +Recommended for Talos/K8s workloads. Terraform holds only the reserved IP; the LB lifecycle is owned by K8s. + +### What stays in Terraform + +```hcl +# terraform.tfvars +load_balancer = null # CCM manages the LB, not Terraform +``` + +The `k8s-ingress-ip` reserved IP remains in Terraform so it survives cluster rebuilds. + +### K8s Service annotation + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + annotations: + # Free 10 Mbps tier — omit this and OCI creates a paid flexible LB + service.beta.kubernetes.io/oci-load-balancer-shape: "10Mbps" + + # Pin to the pre-provisioned reserved IP from Terraform output ingress_reserved_ip + oci.oraclecloud.com/oci-load-balancer-public-ip: "84.8.144.240" +spec: + type: LoadBalancer +``` + +### OCI CCM requirements + +- Install the OCI Cloud Controller Manager in the cluster +- CCM needs OCI credentials: either **instance principal** (preferred — no key management) or a kubeconfig secret +- For instance principal: add IAM policy `Allow dynamic-group k8s-nodes to manage load-balancers in compartment homelab` + +### IP lifecycle + +```text +Terraform apply → reserved IP created (84.8.144.240), unassigned +K8s Service created → CCM creates OCI LB, claims 84.8.144.240 +K8s Service deleted → CCM deletes OCI LB, IP reverts to unassigned (still in TF state) +terraform destroy → reserved IP deleted +``` + +The IP address never changes across cluster rebuilds as long as the Terraform reserved IP resource exists. + +--- + +## Option B — Terraform owns the Load Balancer, wire backends manually + +Keep `load_balancer = {}` in Terraform and add backend sets pointing at your Ampere nodes' NodePorts. +Best when you have no cloud provider in K8s (pure Talos, no CCM). + +```hcl +resource "oci_load_balancer_backend_set" "ingress" { + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + name = "ingress-backend-set" + policy = "ROUND_ROBIN" + health_checker { + protocol = "TCP" + port = 30080 + } +} + +resource "oci_load_balancer_backend" "ampere" { + for_each = toset(oci_core_instance.ampere_instance[*].private_ip) + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + backendset_name = oci_load_balancer_backend_set.ingress.name + ip_address = each.value + port = 30080 # ingress controller NodePort — keep in sync with K8s +} + +resource "oci_load_balancer_listener" "http" { + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + name = "http" + default_backend_set_name = oci_load_balancer_backend_set.ingress.name + port = 80 + protocol = "HTTP" +} +``` + +**Downside:** NodePort is hardcoded in Terraform and must be kept in sync manually. + +--- + +## Option C — Proxmox HA Floating IP + +OCI reserved IPs can be moved between VNICs via API. This enables IP failover for Proxmox HA: +when Proxmox migrates a VM to a surviving node, a hook script reassigns the reserved IP to the new node. + +### What Terraform provides + +```hcl +# A REGIONAL reserved IP, not assigned to any instance at creation time. +# Proxmox scripts claim it on VM start. +resource "oci_core_public_ip" "proxmox_vip" { + compartment_id = var.compartment_ocid + lifetime = "RESERVED" + display_name = "proxmox-vip" + # private_ip_id intentionally omitted — Proxmox assigns it +} +``` + +### Proxmox HA hook script + +Place at `/etc/pve/ha/hooks/` (called by pve-ha-manager on VM start/migrate): + +```bash +#!/bin/bash +# /etc/pve/ha/hooks/assign-oci-vip +# Called with: +# Reassigns the OCI reserved IP to the node where the VM just started. + +EVENT=$1 +VMID=$2 +RESERVED_IP_ID="ocid1.publicip.oc1...." # from terraform output proxmox_vip_id +SUBNET_ID="..." # from terraform output subnet_id +OCI_PROFILE="DEFAULT" + +if [[ "$EVENT" != "started" && "$EVENT" != "relocated" ]]; then + exit 0 +fi + +# Find this node's private IP +NODE_IP=$(ip -4 addr show | grep "10.0.1." | awk '{print $2}' | cut -d/ -f1) + +# Get private IP OCID +PRIVATE_IP_ID=$(oci network private-ip list \ + --subnet-id "$SUBNET_ID" \ + --ip-address "$NODE_IP" \ + --profile "$OCI_PROFILE" \ + | python3 -c "import sys,json; print(json.load(sys.stdin)['data'][0]['id'])") + +# Reassign reserved IP to this node +oci network public-ip update \ + --public-ip-id "$RESERVED_IP_ID" \ + --private-ip-id "$PRIVATE_IP_ID" \ + --profile "$OCI_PROFILE" +``` + +### There is no Proxmox Cloud Controller for OCI + +Unlike K8s (which has the OCI CCM), Proxmox has no native OCI integration. IP failover is always a +custom script. The pattern above is the standard approach. + +--- + +## Recommended stack layout + +```text +Terraform manages: + ├── ingress_reserved_ip → pre-provisioned, claimed by OCI CCM at K8s service creation + ├── bastion_reserved_ip → permanently assigned to micro-instance-1 (bastion) + └── proxmox_vip_ip → floating IP, moved by Proxmox HA hook on failover + +OCI CCM manages: + └── OCI Load Balancer → created/deleted with K8s Service, uses ingress_reserved_ip + +Proxmox HA manages: + └── VM migration + hook → reassigns proxmox_vip_ip to active node on failover +``` diff --git a/mise.toml b/mise.toml index 13c0930..6d97386 100644 --- a/mise.toml +++ b/mise.toml @@ -6,9 +6,9 @@ opentofu = "1.11.5" TERRAFORM_BINARY_PATH = "tofu" [tasks.state-setup] -description = "Bootstrap state backend (single profile, default: syscode-homelab/homelab)" -run = "task state:setup:homelab TF_PROFILE=${TF_PROFILE:-syscode-homelab} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab}" +description = "Bootstrap state backend (single profile, default: DEFAULT/homelab)" +run = "task state:setup:homelab TF_PROFILE=${TF_PROFILE:-DEFAULT} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab}" [tasks.adopt-state] -description = "Adopt existing resources (single profile, default: syscode-homelab/homelab)" -run = "task adopt:state:homelab TF_PROFILE=${TF_PROFILE:-syscode-homelab} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab} STATE_COMPARTMENT_NAME=${STATE_COMPARTMENT_NAME:-homelab} ADOPT_EXISTING=${ADOPT_EXISTING:-true}" +description = "Adopt existing resources (single profile, default: DEFAULT/homelab)" +run = "task adopt:state:homelab TF_PROFILE=${TF_PROFILE:-DEFAULT} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab} STATE_COMPARTMENT_NAME=${STATE_COMPARTMENT_NAME:-homelab} ADOPT_EXISTING=${ADOPT_EXISTING:-true}" diff --git a/scripts/cleanup-oci-storage-safe.sh b/scripts/cleanup-oci-storage-safe.sh index 7fe1d78..296fbda 100755 --- a/scripts/cleanup-oci-storage-safe.sh +++ b/scripts/cleanup-oci-storage-safe.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -: "${OCI_PROFILE:=syscode-homelab}" +: "${OCI_PROFILE:=DEFAULT}" : "${OCI_COMPARTMENT:?OCI_COMPARTMENT is required}" : "${OCI_NAMESPACE:?OCI_NAMESPACE is required}" : "${OCI_BUCKET:?OCI_BUCKET is required}" diff --git a/tofu/oci/terraform.tfvars b/tofu/oci/terraform.tfvars index efef77b..059d4a3 100644 --- a/tofu/oci/terraform.tfvars +++ b/tofu/oci/terraform.tfvars @@ -1,6 +1,4 @@ -omni_ready = true -omni_endpoint = "omni.wind-bearded.ts.net:8090" -oci_config_profile = "syscode-homelab" +omni_ready = true # 4x Ampere nodes: 3 control-plane + 1 worker (1 OCPU / 6 GB each = 4 OCPU / 24 GB total) ampere_nodes = [ @@ -10,8 +8,12 @@ ampere_nodes = [ { name = "oci-talos-worker-1", ocpus = 1, memory_gb = 6, boot_vol_gb = 50 }, ] -# talos_image_ocid — fetched from oci-talos-gitops-apps/omni/talos-image.yaml in CI -# omni_join_token — passed via CI secret OMNI_JOIN_TOKEN -# tailscale_auth_key — passed via CI secret TAILSCALE_AUTH_KEY -# tenancy_ocid — passed via CI secret OCI_TENANCY_OCID -# compartment_ocid — passed via CI secret OCI_COMPARTMENT_OCID +# The following must be set via TF_VAR_ environment variables or -var flags: +# +# TF_VAR_oci_config_profile — OCI CLI profile name (local runs); not needed in CI +# TF_VAR_omni_endpoint — Omni SideroLink endpoint, e.g. "omni.example.com:8090" +# TF_VAR_talos_image_ocid — fetched from oci-talos-gitops-apps/omni/talos-image.yaml in CI +# TF_VAR_omni_join_token — CI secret OMNI_JOIN_TOKEN +# TF_VAR_tailscale_auth_key — CI secret TAILSCALE_AUTH_KEY +# TF_VAR_tenancy_ocid — CI secret OCI_TENANCY_OCID +# TF_VAR_compartment_ocid — CI secret OCI_COMPARTMENT_OCID diff --git a/tofu/oci/terraform.tfvars.example b/tofu/oci/terraform.tfvars.example index 3cceab6..34dbd83 100644 --- a/tofu/oci/terraform.tfvars.example +++ b/tofu/oci/terraform.tfvars.example @@ -1,6 +1,6 @@ # OCI Authentication # Uses a named profile from ~/.oci/config. Run `oci setup config` to configure. -oci_config_profile = "syscode-homelab" +oci_config_profile = "DEFAULT" # Compartment (required — cannot be auto-detected) compartment_ocid = "ocid1.compartment.oc1..aaaaaaaxxxxx" @@ -57,7 +57,7 @@ budget_alert_email = "your-email@example.com" # --------------------------------------------------------------------------- omni_ready = true talos_image_ocid = "ocid1.image.oc1.uk-london-1.aaaaaaaa..." -omni_endpoint = "omni.wind-bearded.ts.net:8090" +omni_endpoint = "omni.example.com:8090" # omni_join_token and tailscale_auth_key are passed via CI secrets or -var flags: # tofu apply -var="omni_join_token=$OMNI_JOIN_TOKEN" -var="tailscale_auth_key=$TAILSCALE_AUTH_KEY" diff --git a/tofu/oci/validation.tf b/tofu/oci/validation.tf index 00fba59..188fd50 100644 --- a/tofu/oci/validation.tf +++ b/tofu/oci/validation.tf @@ -87,7 +87,7 @@ check "omni_ready_requires_talos_image" { check "omni_ready_requires_endpoint" { assert { condition = !var.omni_ready || var.omni_endpoint != null - error_message = "omni_ready = true requires omni_endpoint (e.g. omni.wind-bearded.ts.net:8090)." + error_message = "omni_ready = true requires omni_endpoint (e.g. omni.example.com:8090)." } } diff --git a/tofu/oci/variables.tf b/tofu/oci/variables.tf index 2c6c180..fe9414b 100644 --- a/tofu/oci/variables.tf +++ b/tofu/oci/variables.tf @@ -90,7 +90,7 @@ variable "talos_image_ocid" { # When omni_ready = true, Ampere instances boot Talos and auto-enroll into # your Omni instance via SideroLink over Tailscale. Requires: # talos_image_ocid — Talos+Tailscale image OCID (import once, store in GitHub vars) -# omni_endpoint — Omni gRPC host:port (e.g. omni.wind-bearded.ts.net:8090) +# omni_endpoint — Omni gRPC host:port (e.g. omni.example.com:8090) # omni_join_token — Static join token from: omnictl get connections -o yaml # tailscale_auth_key — Reusable/ephemeral auth key from Tailscale admin with tag:oci # @@ -103,7 +103,7 @@ variable "omni_ready" { } variable "omni_endpoint" { - description = "Omni gRPC endpoint for SideroLink, e.g. omni.wind-bearded.ts.net:8090. Required when omni_ready = true." + description = "Omni gRPC endpoint for SideroLink, e.g. omni.example.com:8090. Required when omni_ready = true." type = string default = null }