diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 9a8c118..e0c0c84 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -49,11 +49,11 @@ jobs: printf '%s' "$OCI_API_KEY_PEM" > ~/.oci/oci_api_key.pem chmod 600 ~/.oci/oci_api_key.pem cat > ~/.oci/config < tofu/oci/backend-config.tfvars env: TF_BACKEND_CONFIG: ${{ secrets.TF_BACKEND_CONFIG }} + - name: Install OCI CLI + run: pip install oci-cli --quiet + + - name: Check free tier capacity + env: + OCI_COMPARTMENT_OCID: ${{ secrets.OCI_COMPARTMENT_OCID }} + run: | + echo "Querying live OCI state for compartment ${OCI_COMPARTMENT_OCID}" + + INSTANCES=$(oci compute instance list \ + --compartment-id "$OCI_COMPARTMENT_OCID" \ + --all --output json 2>/dev/null || echo '{"data":[]}') + + LIVE_STATES='.["lifecycle-state"] != "TERMINATING" and .["lifecycle-state"] != "TERMINATED"' + A1_FILTER="select(.shape==\"VM.Standard.A1.Flex\") | select($LIVE_STATES)" + MICRO_FILTER="select(.shape==\"VM.Standard.E2.1.Micro\") | select($LIVE_STATES)" + + CURRENT_OCPUS=$(echo "$INSTANCES" | \ + jq "[.data[] | $A1_FILTER | (.\"shape-config\".ocpus // 0)] | add // 0") + CURRENT_RAM=$(echo "$INSTANCES" | \ + jq "[.data[] | $A1_FILTER | (.\"shape-config\".\"memory-in-gbs\" // 0)] | add // 0") + CURRENT_MICRO=$(echo "$INSTANCES" | \ + jq "[.data[] | $MICRO_FILTER] | length") + + REQUESTED_OCPUS=$(grep -oE 'ocpus\s*=\s*[0-9]+' tofu/oci/terraform.tfvars \ + | awk -F'=' '{s+=int($2)} END {print s+0}') + REQUESTED_RAM=$(grep -oE 'memory_gb\s*=\s*[0-9]+' tofu/oci/terraform.tfvars \ + | awk -F'=' '{s+=int($2)} END {print s+0}') + REQUESTED_MICRO=$(grep -c 'micro_nodes' tofu/oci/terraform.tfvars || echo 0) + + MAX_AMPERE_OCPUS=4 + MAX_AMPERE_RAM_GB=24 + MAX_MICRO_INSTANCES=1 + + echo "A1 live: ${CURRENT_OCPUS}/${MAX_AMPERE_OCPUS} OCPU, ${CURRENT_RAM}/${MAX_AMPERE_RAM_GB} GB" + echo "A1 tfvars: ${REQUESTED_OCPUS} OCPU, ${REQUESTED_RAM} GB" + echo "Micro: live=${CURRENT_MICRO}, tfvars=${REQUESTED_MICRO}, limit=${MAX_MICRO_INSTANCES}" + + FAIL=0 + if [ "$(echo "$REQUESTED_OCPUS > $MAX_AMPERE_OCPUS" | bc)" = "1" ]; then + echo "ERROR: tfvars requests ${REQUESTED_OCPUS} A1 OCPU but limit is ${MAX_AMPERE_OCPUS}" + FAIL=1 + fi + if [ "$(echo "$CURRENT_OCPUS > $MAX_AMPERE_OCPUS" | bc)" = "1" ]; then + echo "ERROR: live A1 OCPU=${CURRENT_OCPUS} already exceeds limit=${MAX_AMPERE_OCPUS} — drift detected" + FAIL=1 + fi + if [ "$(echo "$REQUESTED_RAM > $MAX_AMPERE_RAM_GB" | bc)" = "1" ]; then + echo "ERROR: tfvars requests ${REQUESTED_RAM} GB A1 RAM but limit is ${MAX_AMPERE_RAM_GB} GB" + FAIL=1 + fi + if [ "$(echo "$CURRENT_RAM > $MAX_AMPERE_RAM_GB" | bc)" = "1" ]; then + echo "ERROR: live A1 RAM=${CURRENT_RAM} GB already exceeds limit=${MAX_AMPERE_RAM_GB} GB — drift detected" + FAIL=1 + fi + if [ "$REQUESTED_MICRO" -gt "$MAX_MICRO_INSTANCES" ]; then + echo "ERROR: tfvars requests ${REQUESTED_MICRO} Micro but limit is ${MAX_MICRO_INSTANCES}" + FAIL=1 + fi + if [ "$CURRENT_MICRO" -gt "$MAX_MICRO_INSTANCES" ]; then + echo "ERROR: live Micro=${CURRENT_MICRO} exceeds limit=${MAX_MICRO_INSTANCES} — drift" + FAIL=1 + fi + exit $FAIL + - name: Setup OpenTofu uses: opentofu/setup-opentofu@v2.0.0 with: @@ -86,13 +152,16 @@ jobs: -var="compartment_ocid=$OCI_COMPARTMENT_OCID" \ -var="talos_image_ocid=${{ steps.talos_image.outputs.ocid }}" \ -var="omni_join_token=$OMNI_JOIN_TOKEN" \ + -var="omni_endpoint=$OMNI_ENDPOINT" \ -var="tailscale_auth_key=$TAILSCALE_AUTH_KEY" \ + -var="oci_config_profile=DEFAULT" \ -var-file=terraform.tfvars \ -out=tfplan env: OCI_TENANCY_OCID: ${{ secrets.OCI_TENANCY_OCID }} OCI_COMPARTMENT_OCID: ${{ secrets.OCI_COMPARTMENT_OCID }} OMNI_JOIN_TOKEN: ${{ secrets.OMNI_JOIN_TOKEN }} + OMNI_ENDPOINT: ${{ secrets.OMNI_ENDPOINT }} TAILSCALE_AUTH_KEY: ${{ secrets.TAILSCALE_AUTH_KEY }} TF_LOG: WARN @@ -120,11 +189,11 @@ jobs: printf '%s' "$OCI_API_KEY_PEM" > ~/.oci/oci_api_key.pem chmod 600 ~/.oci/oci_api_key.pem cat > ~/.oci/config < tofu/oci/backend-config.tfvars diff --git a/.gitignore b/.gitignore index 1f179f4..0d1c610 100644 --- a/.gitignore +++ b/.gitignore @@ -89,6 +89,11 @@ flake.lock .dagger/ dagger/__pycache__/ +# AI tooling +.specstory/ +docs/plans/ +.claude/ + # Artifacts artifacts/ *.qcow2 diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 0000000..733fa13 --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1,6 @@ +# Superseded aspirational/exploratory docs with pre-existing violations +docs/nix-dagger-analysis.md +docs/nix-dagger-detailed.md +docs/OCI_AMPERE_BUILD.md +# Legacy reference plan (Proxmox+Ceph phase, abandoned) — pre-existing violations +PLAN.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5148e49..5e80339 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,7 +24,7 @@ See [DEVELOPMENT.md](DEVELOPMENT.md) for detailed setup instructions. We use [Conventional Commits](https://www.conventionalcommits.org/): -``` +```text (): @@ -33,6 +33,7 @@ We use [Conventional Commits](https://www.conventionalcommits.org/): ``` **Types:** + - `feat`: New feature - `fix`: Bug fix - `docs`: Documentation only @@ -43,7 +44,8 @@ We use [Conventional Commits](https://www.conventionalcommits.org/): - `chore`: Changes to build process or auxiliary tools **Examples:** -``` + +```text feat(tofu): add Proxmox provider configuration docs: update README with devbox instructions @@ -56,16 +58,19 @@ fix(check_availability): handle OCI API timeout errors Before committing, ensure: 1. **Code is formatted:** + ```bash devbox run fmt ``` 2. **Linters pass:** + ```bash devbox run lint ``` 3. **Pre-commit hooks pass:** + ```bash pre-commit run --all-files ``` @@ -83,7 +88,7 @@ Before committing, ensure: - Follow PEP 8 (enforced by Black + Flake8) - Add docstrings to functions - Use type hints where beneficial -- Maximum line length: 100 characters +- Maximum line length: 120 characters ### Documentation @@ -97,6 +102,7 @@ Before committing, ensure: 1. **Fork the repository** 2. **Create a feature branch:** + ```bash git checkout -b feat/your-feature-name ``` @@ -107,18 +113,21 @@ Before committing, ensure: - Add/update tests if applicable 4. **Test locally:** + ```bash devbox run lint devbox run check # Validate OpenTofu ``` 5. **Commit with conventional commits:** + ```bash git add . git commit -m "feat: add new feature" ``` 6. **Push to your fork:** + ```bash git push origin feat/your-feature-name ``` @@ -141,6 +150,7 @@ Before committing, ensure: ### Larger Contributions Please open an issue first to discuss: + - New features - Architecture changes - Breaking changes diff --git a/FREE_TIER_RESOURCES.md b/FREE_TIER_RESOURCES.md new file mode 100644 index 0000000..6c38ba5 --- /dev/null +++ b/FREE_TIER_RESOURCES.md @@ -0,0 +1,169 @@ +# OCI Always Free Resources — Complete Reference + +> Last verified: March 2026 against OCI service limits documentation and API. +> Covers two OCI account types with different Always Free behaviours. + +--- + +## Two Types of OCI Free Tier + +OCI has two distinct account types, both offering "Always Free" resources, but with +differences in how some limits are applied: + +| Feature | Always Free Account | PAYG Account (with Always Free) | +|---------|--------------------|---------------------------------| +| Account type | Dedicated free-tier account | Pay-as-you-go with free allowances | +| E2.1.Micro (x86 bastion) | ✅ Available (up to 2) | ✅ Available (up to 2) | +| A1.Flex OCPU granularity | Integer (1, 2, 3…) | Integer (1, 2, 3…) | +| A1.Flex total allowance | 4 OCPUs / 24 GB RAM | 4 OCPUs / 24 GB RAM | +| Load Balancer (10 Mbps) | ✅ 1 × Always Free | ✅ 1 × Always Free | +| Network Load Balancer | ✅ 1 × Always Free | ✅ 1 × Always Free | +| Block Storage | 200 GB total | 200 GB total | +| Object Storage | 20 GB | 20 GB | + +> **How to identify free resources in the API**: OCI's service limit API marks Always +> Free resources with descriptions containing "Always Free" (e.g. `lb-10mbps-micro-count` +> = "10Mbps **Always Free** Load Balancer Count"). Some Always Free resources (e.g. the +> Network Load Balancer) do not carry this marker in the API response but are still free +> — cross-reference with the [official Always Free documentation](#references). + +--- + +## Compute + +### Ampere A1.Flex (ARM64) + +- **Shape**: `VM.Standard.A1.Flex` +- **Billing type**: `LIMITED_FREE` — reported by OCI shape API +- **Total allowance**: **4 OCPUs and 24 GB RAM** across all instances in the tenancy +- **OCPU granularity**: Integer values only (1, 2, 3, 4) +- **Architecture**: ARM64 (Ampere Altra) + +**Always Free configurations**: + +| Instances | OCPUs each | RAM each | Total OCPUs | Total RAM | +|-----------|-----------|---------|------------|----------| +| 4 | 1 | 6 GB | 4 | 24 GB ✅ | +| 3 | 1 | 8 GB | 3 | 24 GB (1 OCPU unused) | +| 2 | 2 | 12 GB | 4 | 24 GB ✅ | +| 1 | 4 | 24 GB | 4 | 24 GB ✅ | + +> To maximise both OCPUs and RAM: **4 × (1 OCPU / 6 GB)** + +### VM.Standard.E2.1.Micro (x86, AMD) + +- **Shape**: `VM.Standard.E2.1.Micro` (fixed shape, not Flex) +- **Count**: Up to **2 instances** +- **CPU**: 1/8 OCPU per instance +- **RAM**: 1 GB per instance +- **Architecture**: x86_64 (AMD EPYC) +- **Available in both Always Free and PAYG accounts** + +--- + +## Storage + +### Block Volume Storage + +| Limit | Value | Source | +|-------|-------|--------| +| Total free storage | **200 GB** total per tenancy | `total-free-storage-gb` = "Free Volume Size (GB)" | +| Free backups | **5** | `free-backup-count` = "Free Backup Counts" | +| Minimum boot volume | 47 GB per instance | OCI minimum | + +**Storage planning** (boot volumes count toward the 200 GB total): + +| Config | Boot volumes | Remaining for data | +|--------|-------------|-------------------| +| 4 × A1.Flex + 2 × Micro | 4×47 + 2×47 = 282 GB | ❌ exceeds 200 GB | +| 4 × A1.Flex only | 4×47 = 188 GB | 12 GB data | +| 3 × A1.Flex only | 3×50 = 150 GB | 50 GB data | +| 2 × A1.Flex + 1 × Micro | 2×47 + 47 = 141 GB | 59 GB data | + +> Boot volumes are included in the 200 GB total. Plan storage allocations carefully. + +### Object Storage + +- **Capacity**: 20 GB standard storage +- **API Requests**: 50,000/month (10,000 PUT, 50,000 GET) +- **No free archive tier** beyond standard limits + +--- + +## Networking + +### Virtual Cloud Networks (VCN) + +- 2 VCNs, unlimited subnets per VCN +- Internet Gateway, NAT Gateway (1 per VCN), Service Gateway: free + +### Load Balancer + +| Type | Free? | API identifier | Notes | +|------|-------|----------------|-------| +| **Flexible LB (10 Mbps)** | ✅ **Always Free** | `lb-10mbps-micro-count` | 1 instance, L4+L7 | +| Flexible LB (>10 Mbps) | ❌ Paid | `lb-flexible-count` | Pay by bandwidth | +| **Network LB** | ✅ **Always Free** | `max-nlb-flexible-count` | 1 instance, L4 only | + +> **Kubernetes (OCI CCM)**: To provision the free 10 Mbps LB instead of a paid +> flexible LB, annotate your Service with +> `service.beta.kubernetes.io/oci-load-balancer-shape: "10Mbps"`. +> Without this annotation the CCM defaults to a paid flexible shape. + +### Public IP Addresses + +- **Reserved IPs**: 2 reserved public IPv4 addresses (free) +- **Ephemeral IPs**: Assigned to instances at no cost + +### Data Transfer + +- **Outbound**: 10 TB/month free +- **Inbound**: Always free + +--- + +## Database + +### Autonomous Database + +- 2 databases, 1 OCPU each, 20 GB storage each +- Types: ATP (OLTP), ADW (analytics), JSON + +### NoSQL Database + +- 3 tables, 25 GB per table, 133M reads + 133M writes/month + +--- + +## Service Limits Quick Reference + +| Resource | Always Free Amount | +|----------|-------------------| +| A1.Flex OCPUs | **4 total** | +| A1.Flex RAM | **24 GB total** | +| E2.1.Micro instances | **2** (both account types) | +| Block Storage | **200 GB total** | +| Block Storage Backups | **5** | +| Object Storage | **20 GB** | +| Load Balancer (10 Mbps) | **1** | +| Network Load Balancer | **1** | +| Reserved Public IPs | **2** | +| VCNs | **2** | +| Outbound Transfer | **10 TB/month** | +| Autonomous Databases | **2** (1 OCPU, 20 GB each) | +| NoSQL Tables | **3** (25 GB each) | +| Functions | **2M invocations/month** | +| API Gateway | **1M requests/month** | +| Monitoring | **500M datapoints/month** | + +--- + +## References + +- [OCI Always Free Documentation](https://docs.oracle.com/en-us/iaas/Content/FreeTier/freetier_topic-Always_Free_Resources.htm) +- [OCI Service Limits](https://docs.oracle.com/en-us/iaas/Content/General/Concepts/servicelimits.htm) +- [OCI Pricing](https://www.oracle.com/cloud/price-list.html) + +--- + +**Last verified:** March 2026 — OCI service limits documentation and API diff --git a/PLAN.md b/PLAN.md index e56ea4d..f000051 100644 --- a/PLAN.md +++ b/PLAN.md @@ -4,8 +4,8 @@ Complete implementation plan for deploying a Kubernetes cluster on OCI free tier ## Infrastructure Configuration -**Region**: uk-london-1 -**Account**: PAYG (Pay-As-You-Go) recommended for better Ampere availability +**Region**: uk-london-1 +**Account**: PAYG (Pay-As-You-Go) recommended for better Ampere availability **Budget Alert**: $0.01 threshold ### Resource Allocation @@ -97,15 +97,14 @@ Deploy instances using Terraform. ### Terraform Configuration -Update terraform.tfvars: +Update terraform.tfvars (see `tofu/oci/terraform.tfvars.example` for full reference): ```hcl -region = "uk-london-1" -ampere_instance_count = 3 -ampere_ocpus_per_instance = 1.33 -ampere_memory_per_instance = 8 -ampere_boot_volume_size = 50 -micro_instance_count = 1 -micro_boot_volume_size = 50 +omni_ready = false # or true for Talos+Omni mode +ampere_nodes = [ + { name = "app-1", ocpus = 2, memory_gb = 12, boot_vol_gb = 50 }, + { name = "app-2", ocpus = 2, memory_gb = 12, boot_vol_gb = 50 }, +] +micro_nodes = [{ name = "bastion" }] ``` Add custom images to data.tf or reference OCIDs in main.tf. @@ -137,9 +136,9 @@ resource "oci_core_public_ip" "ingress" { Deploy: ```bash cd tofu/oci -terraform init -terraform plan # Verify only free tier resources -terraform apply +tofu init -backend-config=backend-config.tfvars +tofu plan # Verify only free tier resources +tofu apply ``` ### Post-Deployment diff --git a/README.md b/README.md index 589bf43..f9814f1 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![CI](https://github.com/syscode-labs/oci-free-tier-manager/workflows/CI/badge.svg)](https://github.com/syscode-labs/oci-free-tier-manager/actions/workflows/ci.yml) -[![OpenTofu](https://img.shields.io/badge/OpenTofu-1.8-844FBA?logo=terraform)](https://opentofu.org/) +[![OpenTofu](https://img.shields.io/badge/OpenTofu-1.11-844FBA?logo=terraform)](https://opentofu.org/) [![OCI Free Tier](https://img.shields.io/badge/OCI-Always%20Free-F80000?logo=oracle)](https://www.oracle.com/cloud/free/) OpenTofu infrastructure for OCI Always Free tier — provisions 4× Ampere A1.Flex (ARM64) + 1× Micro instance. @@ -43,10 +43,10 @@ Key variables: omni_ready = false # Talos + Omni enrollment -omni_ready = true -talos_image_ocid = "ocid1.image.oc1..." # auto-fetched from oci-talos-gitops-apps in CI -omni_endpoint = "https://your-omni.example.com" -siderolink_join_token = "..." +omni_ready = true +talos_image_ocid = "ocid1.image.oc1..." # auto-fetched from oci-talos-gitops-apps in CI +omni_endpoint = "omni.example.com:8090" +omni_join_token = "..." # or pass via -var / TF_VAR_omni_join_token ``` ### Deploy @@ -89,10 +89,12 @@ When `omni_ready = true`: ### "Out of capacity" for Ampere -Normal — Ampere instances are highly contested: +Normal — Ampere instances are highly contested. The CI deploy workflow retries +automatically. For manual deployments: -- Run the capacity checker: `python3 scripts/provision_free_tier_retry.py` -- Try different regions or off-peak hours +- Re-run `tofu apply` — OCI eventually allocates capacity +- Try a different availability domain within the same region +- Try off-peak hours ### Storage limit exceeded diff --git a/ROADMAP.md b/ROADMAP.md index 57dbf82..702bb08 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -4,19 +4,24 @@ Complete implementation plan for OCI Free Tier Kubernetes cluster. ## Current State -✅ **Completed:** +✅ **Implemented:** -- Layer 1: OCI infrastructure provisioning (`tofu/oci/`) — VCN, 3× Ampere, 1× Micro, reserved IPs -- Layer 2: Packer golden image — Proxmox ARM64 on OCI builder (`packer/proxmox-ampere.pkr.hcl`) -- Layer 2: Ansible cluster automation (`ansible/proxmox-cluster/`) — pvecm + Ceph + Tailscale LXC -- Availability checker script -- Development environment (devbox + pre-commit) +- OCI infrastructure provisioning (`tofu/oci/`) — VCN, up to 4× Ampere A1.Flex + optional Micro instances +- Two deployment modes via `omni_ready` toggle: + - **Ubuntu mode** (`omni_ready = false`): plain Ubuntu instances, SSH access + - **Talos+Omni mode** (`omni_ready = true`): Talos Linux nodes auto-enrolled into Omni via SideroLink over Tailscale +- GitHub Actions CI/CD with free-tier capacity pre-flight guard +- HTTP remote state backend (OCI Object Storage PAR URL) +- Development environment (mise + pre-commit) -❌ **Missing:** +> **Note:** The original Proxmox+Ceph roadmap (Packer images, Ansible automation) was +> abandoned in favour of the Talos+Omni approach. Phases 1–6 below are retained for +> historical reference but reflect the old direction and have not been implemented. -- Layer 3: Talos Kubernetes automation (`tofu/talos/`) -- Monitoring integration (Grafana Alloy) -- End-to-end deployment orchestration +❌ **Not yet implemented:** + +- Monitoring integration (Grafana Alloy / Grafana Cloud) +- End-to-end deployment orchestration script ## Implementation Phases diff --git a/docs/OCI_AMPERE_BUILD.md b/docs/OCI_AMPERE_BUILD.md index c85cc4c..132c76f 100644 --- a/docs/OCI_AMPERE_BUILD.md +++ b/docs/OCI_AMPERE_BUILD.md @@ -1,5 +1,9 @@ ## OCI Ampere image builds on free tier (ephemeral) +> **Not yet implemented** — The workflows referenced below +> (`.github/workflows/packer-oci-ampere.yml`, `.github/workflows/packer-aws-arm.yml`) +> do not exist. This documents the intended approach when Packer image building is added. + Goal: build packer images on ARM64 without consuming the production free-tier A1 capacity. ### Approach diff --git a/docs/OCI_RESERVED_IPS_AND_LB.md b/docs/OCI_RESERVED_IPS_AND_LB.md new file mode 100644 index 0000000..dab20fc --- /dev/null +++ b/docs/OCI_RESERVED_IPS_AND_LB.md @@ -0,0 +1,186 @@ +# OCI Reserved IPs and Load Balancer — Usage Patterns + +## Reserved IP Pricing + +**Reserved public IPs are free on OCI — assigned or unassigned, any quantity.** + +This differs from AWS (charges for idle Elastic IPs) and Azure (charges for static IPs). +Verified empirically: a reserved IP was created, attached to a running instance, detached, and deleted with zero cost impact. + +--- + +## Current Reserved IPs (managed by Terraform) + +| Name | IP | Purpose | State | +|------|----|---------|-------| +| `k8s-ingress-ip` | `84.8.144.240` | K8s ingress / OCI CCM LB | Unassigned (pre-provisioned) | +| `bastion-ip` | `145.241.217.226` | Micro instance fixed address | Assigned to micro-instance-1 | +| `ampere-instance-1-ip` | `144.21.60.245` | Ampere node 1 | Assigned (replaced lost ephemeral) | + +> **Warning:** If you delete an ephemeral public IP from a VNIC, OCI will not auto-reassign one. +> You must create a reserved IP and assign it manually. Avoid deleting ephemeral IPs outside Terraform. + +--- + +## Option A — OCI Cloud Controller Manager owns the Load Balancer + +Recommended for Talos/K8s workloads. Terraform holds only the reserved IP; the LB lifecycle is owned by K8s. + +### What stays in Terraform + +```hcl +# terraform.tfvars +load_balancer = null # CCM manages the LB, not Terraform +``` + +The `k8s-ingress-ip` reserved IP remains in Terraform so it survives cluster rebuilds. + +### K8s Service annotation + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + annotations: + # Free 10 Mbps tier — omit this and OCI creates a paid flexible LB + service.beta.kubernetes.io/oci-load-balancer-shape: "10Mbps" + + # Pin to the pre-provisioned reserved IP from Terraform output ingress_reserved_ip + oci.oraclecloud.com/oci-load-balancer-public-ip: "84.8.144.240" +spec: + type: LoadBalancer +``` + +### OCI CCM requirements + +- Install the OCI Cloud Controller Manager in the cluster +- CCM needs OCI credentials: either **instance principal** (preferred — no key management) or a kubeconfig secret +- For instance principal: add IAM policy `Allow dynamic-group k8s-nodes to manage load-balancers in compartment homelab` + +### IP lifecycle + +```text +Terraform apply → reserved IP created (84.8.144.240), unassigned +K8s Service created → CCM creates OCI LB, claims 84.8.144.240 +K8s Service deleted → CCM deletes OCI LB, IP reverts to unassigned (still in TF state) +terraform destroy → reserved IP deleted +``` + +The IP address never changes across cluster rebuilds as long as the Terraform reserved IP resource exists. + +--- + +## Option B — Terraform owns the Load Balancer, wire backends manually + +Keep `load_balancer = {}` in Terraform and add backend sets pointing at your Ampere nodes' NodePorts. +Best when you have no cloud provider in K8s (pure Talos, no CCM). + +```hcl +resource "oci_load_balancer_backend_set" "ingress" { + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + name = "ingress-backend-set" + policy = "ROUND_ROBIN" + health_checker { + protocol = "TCP" + port = 30080 + } +} + +resource "oci_load_balancer_backend" "ampere" { + for_each = toset(oci_core_instance.ampere_instance[*].private_ip) + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + backendset_name = oci_load_balancer_backend_set.ingress.name + ip_address = each.value + port = 30080 # ingress controller NodePort — keep in sync with K8s +} + +resource "oci_load_balancer_listener" "http" { + load_balancer_id = oci_load_balancer_load_balancer.free_tier_lb[0].id + name = "http" + default_backend_set_name = oci_load_balancer_backend_set.ingress.name + port = 80 + protocol = "HTTP" +} +``` + +**Downside:** NodePort is hardcoded in Terraform and must be kept in sync manually. + +--- + +## Option C — Proxmox HA Floating IP + +OCI reserved IPs can be moved between VNICs via API. This enables IP failover for Proxmox HA: +when Proxmox migrates a VM to a surviving node, a hook script reassigns the reserved IP to the new node. + +### What Terraform provides + +```hcl +# A REGIONAL reserved IP, not assigned to any instance at creation time. +# Proxmox scripts claim it on VM start. +resource "oci_core_public_ip" "proxmox_vip" { + compartment_id = var.compartment_ocid + lifetime = "RESERVED" + display_name = "proxmox-vip" + # private_ip_id intentionally omitted — Proxmox assigns it +} +``` + +### Proxmox HA hook script + +Place at `/etc/pve/ha/hooks/` (called by pve-ha-manager on VM start/migrate): + +```bash +#!/bin/bash +# /etc/pve/ha/hooks/assign-oci-vip +# Called with: +# Reassigns the OCI reserved IP to the node where the VM just started. + +EVENT=$1 +VMID=$2 +RESERVED_IP_ID="ocid1.publicip.oc1...." # from terraform output proxmox_vip_id +SUBNET_ID="..." # from terraform output subnet_id +OCI_PROFILE="DEFAULT" + +if [[ "$EVENT" != "started" && "$EVENT" != "relocated" ]]; then + exit 0 +fi + +# Find this node's private IP +NODE_IP=$(ip -4 addr show | grep "10.0.1." | awk '{print $2}' | cut -d/ -f1) + +# Get private IP OCID +PRIVATE_IP_ID=$(oci network private-ip list \ + --subnet-id "$SUBNET_ID" \ + --ip-address "$NODE_IP" \ + --profile "$OCI_PROFILE" \ + | python3 -c "import sys,json; print(json.load(sys.stdin)['data'][0]['id'])") + +# Reassign reserved IP to this node +oci network public-ip update \ + --public-ip-id "$RESERVED_IP_ID" \ + --private-ip-id "$PRIVATE_IP_ID" \ + --profile "$OCI_PROFILE" +``` + +### There is no Proxmox Cloud Controller for OCI + +Unlike K8s (which has the OCI CCM), Proxmox has no native OCI integration. IP failover is always a +custom script. The pattern above is the standard approach. + +--- + +## Recommended stack layout + +```text +Terraform manages: + ├── ingress_reserved_ip → pre-provisioned, claimed by OCI CCM at K8s service creation + ├── bastion_reserved_ip → permanently assigned to micro-instance-1 (bastion) + └── proxmox_vip_ip → floating IP, moved by Proxmox HA hook on failover + +OCI CCM manages: + └── OCI Load Balancer → created/deleted with K8s Service, uses ingress_reserved_ip + +Proxmox HA manages: + └── VM migration + hook → reassigns proxmox_vip_ip to active node on failover +``` diff --git a/docs/nix-dagger-analysis.md b/docs/nix-dagger-analysis.md index ce3faab..c73254f 100644 --- a/docs/nix-dagger-analysis.md +++ b/docs/nix-dagger-analysis.md @@ -1,5 +1,8 @@ # Nix + Dagger Architecture Analysis +> **Superseded** — This was an exploratory analysis. The project uses mise for dev +> tooling and GitHub Actions for CI. Nix and Dagger were not adopted. + ## Pure Nix Approach ### What Nix Would Handle @@ -81,24 +84,24 @@ type = "app"; program = toString (pkgs.writeShellScript "deploy" '' set -e - + # Phase 1: Build images echo "Building images..." nix build .#base-image nix build .#proxmox-image - + # Phase 2: Deploy OCI echo "Deploying OCI infrastructure..." cd tofu/oci ${pkgs.opentofu}/bin/tofu init ${pkgs.opentofu}/bin/tofu apply -auto-approve - + # Phase 3: Proxmox cluster echo "Setting up Proxmox cluster..." cd ../proxmox-cluster ${pkgs.opentofu}/bin/tofu init ${pkgs.opentofu}/bin/tofu apply -auto-approve - + # Phase 4: Talos K8s echo "Deploying Talos Kubernetes..." cd ../talos @@ -214,10 +217,10 @@ class OciFreetier: .with_exec(["packer", "build", "base-hardened.pkr.hcl"]) .directory("/work/output-qemu") ) - + @function async def build_proxmox_image( - self, + self, base_image: dagger.Directory ) -> dagger.Directory: """Build Proxmox image from base""" @@ -234,19 +237,19 @@ class OciFreetier: ]) .directory("/work/output-qemu") ) - + @function async def build_all_images(self) -> str: """Build both images sequentially""" base = await self.build_base_image() proxmox = await self.build_proxmox_image(base) - + # Export to host await base.export("./artifacts/base-hardened") await proxmox.export("./artifacts/proxmox-ampere") - + return "Images built successfully" - + @function async def upload_to_oci( self, @@ -397,8 +400,8 @@ nix run .#validate ## Final Verdict -**If you're comfortable with Nix:** Go **Nix + Dagger** -**If Nix feels too heavy:** Go **Task + Dagger** +**If you're comfortable with Nix:** Go **Nix + Dagger** +**If Nix feels too heavy:** Go **Task + Dagger** **If you want simplest possible:** Go **Task + bash scripts** (no Dagger) I'd recommend **Nix + Dagger** since you're already in the Nix ecosystem (devbox), but it depends on your comfort level. diff --git a/docs/nix-dagger-detailed.md b/docs/nix-dagger-detailed.md index 288671d..dd78510 100644 --- a/docs/nix-dagger-detailed.md +++ b/docs/nix-dagger-detailed.md @@ -1,5 +1,8 @@ # Nix + Dagger Implementation - Detailed Plan +> **Superseded** — This implementation plan was not pursued. The project uses mise for +> dev tooling and GitHub Actions for CI. Retained for reference only. + ## Architecture Overview **Separation of Concerns:** @@ -69,58 +72,58 @@ oci-free-tier-manager/ flake-utils.lib.eachDefaultSystem (system: let pkgs = nixpkgs.legacyPackages.${system}; - + # Python environment for Dagger pythonEnv = pkgs.python312.withPackages (ps: with ps; [ dagger-io requests pyyaml ]); - + in { # Development shell (replaces/augments devbox) devShells.default = pkgs.mkShell { name = "oci-free-tier-dev"; - + buildInputs = with pkgs; [ # Infrastructure tools opentofu kubectl helm talosctl - + # Security tools sops age - + # Image building packer qemu - + # Dagger dagger pythonEnv - + # Utilities jq yq gh git curl - + # OCI CLI oci-cli - + # Linting/formatting terraform-ls tflint shellcheck yamllint - + # Pre-commit pre-commit ]; - + shellHook = '' echo "🚀 OCI Free Tier Manager Development Environment" echo "" @@ -132,7 +135,7 @@ oci-free-tier-manager/ echo " nix run .#deploy-all - Full deployment (all phases)" echo " nix run .#validate - Run validation checks" echo "" - + # Initialize pre-commit hooks if [ -f .git/hooks/pre-commit ]; then echo "✓ Pre-commit hooks installed" @@ -142,7 +145,7 @@ oci-free-tier-manager/ fi ''; }; - + # Nix apps (CLI commands) apps = { # Build images with Dagger @@ -156,7 +159,7 @@ oci-free-tier-manager/ build-all-images ''); }; - + # Deploy OCI infrastructure deploy-oci = { type = "app"; @@ -168,7 +171,7 @@ oci-free-tier-manager/ ${pkgs.opentofu}/bin/tofu apply ''); }; - + # Setup Proxmox cluster deploy-proxmox = { type = "app"; @@ -180,7 +183,7 @@ oci-free-tier-manager/ ${pkgs.opentofu}/bin/tofu apply ''); }; - + # Deploy Talos Kubernetes deploy-talos = { type = "app"; @@ -192,110 +195,110 @@ oci-free-tier-manager/ ${pkgs.opentofu}/bin/tofu apply ''); }; - + # Full deployment deploy-all = { type = "app"; program = toString (pkgs.writeShellScript "deploy-all" '' set -euo pipefail - + echo "╔═══════════════════════════════════════════╗" echo "║ OCI Free Tier Full Stack Deployment ║" echo "╚═══════════════════════════════════════════╝" echo "" - + # Phase 1: Build images echo "==> Phase 1: Building custom images..." nix run .#build-images echo "" - + # Phase 2: Deploy OCI echo "==> Phase 2: Deploying OCI infrastructure..." nix run .#deploy-oci echo "" - + # Phase 3: Setup Proxmox echo "==> Phase 3: Setting up Proxmox cluster..." nix run .#deploy-proxmox echo "" - + # Phase 4: Deploy Talos echo "==> Phase 4: Deploying Talos Kubernetes..." nix run .#deploy-talos echo "" - + echo "✓ Full deployment complete!" echo "" echo "Run 'nix run .#validate' to verify the deployment." ''); }; - + # Validation validate = { type = "app"; program = toString (pkgs.writeShellScript "validate" '' set -euo pipefail - + echo "Running validation checks..." - + # Validate images if [ -f scripts/validate-phase1.sh ]; then bash scripts/validate-phase1.sh fi - + # Validate OCI if [ -f scripts/validate-phase2.sh ]; then bash scripts/validate-phase2.sh fi - + # Validate Proxmox if [ -f scripts/validate-phase3.sh ]; then bash scripts/validate-phase3.sh fi - + # Validate Talos if [ -f scripts/validate-phase4.sh ]; then bash scripts/validate-phase4.sh fi - + # Validate cost if [ -f scripts/validate-cost.sh ]; then bash scripts/validate-cost.sh fi - + echo "✓ All validation checks passed!" ''); }; - + # Destroy everything (for testing) destroy-all = { type = "app"; program = toString (pkgs.writeShellScript "destroy-all" '' set -euo pipefail - + echo "⚠️ WARNING: This will destroy ALL infrastructure!" read -p "Are you sure? (type 'yes' to confirm): " confirm - + if [ "$confirm" != "yes" ]; then echo "Aborted." exit 1 fi - + # Destroy in reverse order echo "Destroying Talos cluster..." cd tofu/talos && ${pkgs.opentofu}/bin/tofu destroy -auto-approve - + echo "Destroying Proxmox cluster..." cd ../proxmox-cluster && ${pkgs.opentofu}/bin/tofu destroy -auto-approve - + echo "Destroying OCI infrastructure..." cd ../oci && ${pkgs.opentofu}/bin/tofu destroy -auto-approve - + echo "✓ All infrastructure destroyed" ''); }; }; - + # Packages (for nix build) packages = { # Could add packages here if needed @@ -346,7 +349,7 @@ import os @object_type class Main: """Main pipeline for OCI Free Tier infrastructure""" - + @function async def build_base_image( self, @@ -357,23 +360,23 @@ class Main: ) -> dagger.Directory: """ Build base hardened image with Packer - + Returns directory with base-hardened.qcow2 """ return await ( dag.container() .from_("hashicorp/packer:latest") - + # Install dependencies .with_exec(["apk", "add", "--no-cache", "qemu-img", "qemu-system-x86_64"]) - + # Copy Packer configs .with_directory("/work", source.directory("packer")) .with_workdir("/work") - + # Initialize Packer .with_exec(["packer", "init", "."]) - + # Build base image .with_exec([ "packer", "build", @@ -381,11 +384,11 @@ class Main: "-var", "headless=true", "base-hardened.pkr.hcl" ]) - + # Return output directory .directory("/work/output-qemu") ) - + @function async def build_proxmox_image( self, @@ -394,27 +397,27 @@ class Main: ) -> dagger.Directory: """ Build Proxmox image from base - + Returns directory with proxmox-ampere.qcow2 """ return await ( dag.container() .from_("hashicorp/packer:latest") - + # Install dependencies .with_exec(["apk", "add", "--no-cache", "qemu-img", "qemu-system-x86_64"]) - + # Copy Packer configs .with_directory("/work", source.directory("packer")) - + # Copy base image .with_directory("/work/base", base_image) - + .with_workdir("/work") - + # Initialize Packer .with_exec(["packer", "init", "."]) - + # Build Proxmox image .with_exec([ "packer", "build", @@ -423,11 +426,11 @@ class Main: "-var", "source_image=/work/base/base-hardened.qcow2", "proxmox-ampere.pkr.hcl" ]) - + # Return output directory .directory("/work/output-qemu") ) - + @function async def build_all_images( self, @@ -438,28 +441,28 @@ class Main: ) -> str: """ Build both images sequentially and export to host - + Returns success message with artifact locations """ # Use current directory if not provided if source is None: source = dag.host().directory(".") - + print("Building base image...") base = await self.build_base_image(source) - + print("Building Proxmox image...") proxmox = await self.build_proxmox_image(source, base) - + # Export to host print("Exporting images to ./artifacts/...") await base.export("./artifacts/base-hardened") await proxmox.export("./artifacts/proxmox-ampere") - + # Get image sizes base_files = await base.entries() proxmox_files = await proxmox.entries() - + return f""" ✓ Images built successfully! @@ -471,7 +474,7 @@ Next steps: 1. Upload to OCI: dagger call upload-to-oci 2. Deploy infrastructure: nix run .#deploy-oci """ - + @function async def upload_to_oci( self, @@ -485,22 +488,22 @@ Next steps: ) -> str: """ Upload images to OCI Object Storage and create custom images - + Requires OCI CLI authentication (via config file or env vars) """ # Read artifacts from host artifacts = dag.host().directory("./artifacts") - + container = ( dag.container() .from_("ghcr.io/oracle/oci-cli:latest") .with_directory("/artifacts", artifacts) ) - + # Configure OCI CLI if config provided if oci_config: container = container.with_secret_variable("OCI_CONFIG", oci_config) - + # Upload base image result = await ( container @@ -511,7 +514,7 @@ Next steps: "--name", "base-hardened.qcow2", "--force" ]) - + # Upload Proxmox image .with_exec([ "oci", "os", "object", "put", @@ -520,7 +523,7 @@ Next steps: "--name", "proxmox-ampere.qcow2", "--force" ]) - + # Verify total size < 20GB .with_exec([ "sh", "-c", @@ -529,7 +532,7 @@ Next steps: "jq 'add' | " "awk '{if ($1 > 21474836480) exit 1}'" ]) - + # Create custom images .with_exec([ "oci", "compute", "image", "create", @@ -539,7 +542,7 @@ Next steps: "--object-name", "base-hardened.qcow2", "--region", region ]) - + .with_exec([ "oci", "compute", "image", "create", "--compartment-id", compartment_id, @@ -548,12 +551,12 @@ Next steps: "--object-name", "proxmox-ampere.qcow2", "--region", region ]) - + .stdout() ) - + return f"✓ Images uploaded to OCI Object Storage and custom images created\n{result}" - + @function async def validate_images( self, @@ -561,11 +564,11 @@ Next steps: ) -> str: """ Validate that built images meet size requirements - + Returns validation report """ artifacts = dag.host().directory("./artifacts") - + result = await ( dag.container() .from_("alpine:latest") @@ -576,27 +579,27 @@ Next steps: f""" set -e echo "Validating image sizes..." - + BASE_SIZE=$(qemu-img info --output=json /artifacts/base-hardened/*.qcow2 | jq '.["virtual-size"]') PROXMOX_SIZE=$(qemu-img info --output=json /artifacts/proxmox-ampere/*.qcow2 | jq '.["virtual-size"]') TOTAL_SIZE=$((BASE_SIZE + PROXMOX_SIZE)) MAX_SIZE=$((21474836480)) # 20GB in bytes - + echo "Base image: $(($BASE_SIZE / 1024 / 1024 / 1024))GB" echo "Proxmox image: $(($PROXMOX_SIZE / 1024 / 1024 / 1024))GB" echo "Total: $(($TOTAL_SIZE / 1024 / 1024 / 1024))GB / 20GB" - + if [ $TOTAL_SIZE -gt $MAX_SIZE ]; then echo "ERROR: Total size exceeds 20GB OCI free tier limit!" exit 1 fi - + echo "✓ Images within size limits" """ ]) .stdout() ) - + return result ``` @@ -693,37 +696,37 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - + - uses: DeterminateSystems/nix-installer-action@v9 - uses: DeterminateSystems/magic-nix-cache-action@v2 - + - uses: dagger/dagger-for-github@v5 with: verb: call args: build-all-images - + - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: images path: artifacts/ - + deploy: needs: build-images runs-on: ubuntu-latest environment: production steps: - uses: actions/checkout@v4 - + - uses: DeterminateSystems/nix-installer-action@v9 - + - name: Configure OCI CLI env: OCI_CONFIG: ${{ secrets.OCI_CONFIG }} run: | mkdir -p ~/.oci echo "$OCI_CONFIG" > ~/.oci/config - + - name: Deploy infrastructure run: nix run .#deploy-all ``` diff --git a/mise.toml b/mise.toml index 13c0930..6d97386 100644 --- a/mise.toml +++ b/mise.toml @@ -6,9 +6,9 @@ opentofu = "1.11.5" TERRAFORM_BINARY_PATH = "tofu" [tasks.state-setup] -description = "Bootstrap state backend (single profile, default: syscode-homelab/homelab)" -run = "task state:setup:homelab TF_PROFILE=${TF_PROFILE:-syscode-homelab} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab}" +description = "Bootstrap state backend (single profile, default: DEFAULT/homelab)" +run = "task state:setup:homelab TF_PROFILE=${TF_PROFILE:-DEFAULT} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab}" [tasks.adopt-state] -description = "Adopt existing resources (single profile, default: syscode-homelab/homelab)" -run = "task adopt:state:homelab TF_PROFILE=${TF_PROFILE:-syscode-homelab} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab} STATE_COMPARTMENT_NAME=${STATE_COMPARTMENT_NAME:-homelab} ADOPT_EXISTING=${ADOPT_EXISTING:-true}" +description = "Adopt existing resources (single profile, default: DEFAULT/homelab)" +run = "task adopt:state:homelab TF_PROFILE=${TF_PROFILE:-DEFAULT} COMPARTMENT_NAME=${COMPARTMENT_NAME:-homelab} STATE_COMPARTMENT_NAME=${STATE_COMPARTMENT_NAME:-homelab} ADOPT_EXISTING=${ADOPT_EXISTING:-true}" diff --git a/scripts/cleanup-oci-storage-safe.sh b/scripts/cleanup-oci-storage-safe.sh index 7fe1d78..296fbda 100755 --- a/scripts/cleanup-oci-storage-safe.sh +++ b/scripts/cleanup-oci-storage-safe.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -: "${OCI_PROFILE:=syscode-homelab}" +: "${OCI_PROFILE:=DEFAULT}" : "${OCI_COMPARTMENT:?OCI_COMPARTMENT is required}" : "${OCI_NAMESPACE:?OCI_NAMESPACE is required}" : "${OCI_BUCKET:?OCI_BUCKET is required}" diff --git a/tofu/oci/terraform.tfvars b/tofu/oci/terraform.tfvars index efef77b..059d4a3 100644 --- a/tofu/oci/terraform.tfvars +++ b/tofu/oci/terraform.tfvars @@ -1,6 +1,4 @@ -omni_ready = true -omni_endpoint = "omni.wind-bearded.ts.net:8090" -oci_config_profile = "syscode-homelab" +omni_ready = true # 4x Ampere nodes: 3 control-plane + 1 worker (1 OCPU / 6 GB each = 4 OCPU / 24 GB total) ampere_nodes = [ @@ -10,8 +8,12 @@ ampere_nodes = [ { name = "oci-talos-worker-1", ocpus = 1, memory_gb = 6, boot_vol_gb = 50 }, ] -# talos_image_ocid — fetched from oci-talos-gitops-apps/omni/talos-image.yaml in CI -# omni_join_token — passed via CI secret OMNI_JOIN_TOKEN -# tailscale_auth_key — passed via CI secret TAILSCALE_AUTH_KEY -# tenancy_ocid — passed via CI secret OCI_TENANCY_OCID -# compartment_ocid — passed via CI secret OCI_COMPARTMENT_OCID +# The following must be set via TF_VAR_ environment variables or -var flags: +# +# TF_VAR_oci_config_profile — OCI CLI profile name (local runs); not needed in CI +# TF_VAR_omni_endpoint — Omni SideroLink endpoint, e.g. "omni.example.com:8090" +# TF_VAR_talos_image_ocid — fetched from oci-talos-gitops-apps/omni/talos-image.yaml in CI +# TF_VAR_omni_join_token — CI secret OMNI_JOIN_TOKEN +# TF_VAR_tailscale_auth_key — CI secret TAILSCALE_AUTH_KEY +# TF_VAR_tenancy_ocid — CI secret OCI_TENANCY_OCID +# TF_VAR_compartment_ocid — CI secret OCI_COMPARTMENT_OCID diff --git a/tofu/oci/terraform.tfvars.example b/tofu/oci/terraform.tfvars.example index 3cceab6..34dbd83 100644 --- a/tofu/oci/terraform.tfvars.example +++ b/tofu/oci/terraform.tfvars.example @@ -1,6 +1,6 @@ # OCI Authentication # Uses a named profile from ~/.oci/config. Run `oci setup config` to configure. -oci_config_profile = "syscode-homelab" +oci_config_profile = "DEFAULT" # Compartment (required — cannot be auto-detected) compartment_ocid = "ocid1.compartment.oc1..aaaaaaaxxxxx" @@ -57,7 +57,7 @@ budget_alert_email = "your-email@example.com" # --------------------------------------------------------------------------- omni_ready = true talos_image_ocid = "ocid1.image.oc1.uk-london-1.aaaaaaaa..." -omni_endpoint = "omni.wind-bearded.ts.net:8090" +omni_endpoint = "omni.example.com:8090" # omni_join_token and tailscale_auth_key are passed via CI secrets or -var flags: # tofu apply -var="omni_join_token=$OMNI_JOIN_TOKEN" -var="tailscale_auth_key=$TAILSCALE_AUTH_KEY" diff --git a/tofu/oci/validation.tf b/tofu/oci/validation.tf index 00fba59..188fd50 100644 --- a/tofu/oci/validation.tf +++ b/tofu/oci/validation.tf @@ -87,7 +87,7 @@ check "omni_ready_requires_talos_image" { check "omni_ready_requires_endpoint" { assert { condition = !var.omni_ready || var.omni_endpoint != null - error_message = "omni_ready = true requires omni_endpoint (e.g. omni.wind-bearded.ts.net:8090)." + error_message = "omni_ready = true requires omni_endpoint (e.g. omni.example.com:8090)." } } diff --git a/tofu/oci/variables.tf b/tofu/oci/variables.tf index 2c6c180..fe9414b 100644 --- a/tofu/oci/variables.tf +++ b/tofu/oci/variables.tf @@ -90,7 +90,7 @@ variable "talos_image_ocid" { # When omni_ready = true, Ampere instances boot Talos and auto-enroll into # your Omni instance via SideroLink over Tailscale. Requires: # talos_image_ocid — Talos+Tailscale image OCID (import once, store in GitHub vars) -# omni_endpoint — Omni gRPC host:port (e.g. omni.wind-bearded.ts.net:8090) +# omni_endpoint — Omni gRPC host:port (e.g. omni.example.com:8090) # omni_join_token — Static join token from: omnictl get connections -o yaml # tailscale_auth_key — Reusable/ephemeral auth key from Tailscale admin with tag:oci # @@ -103,7 +103,7 @@ variable "omni_ready" { } variable "omni_endpoint" { - description = "Omni gRPC endpoint for SideroLink, e.g. omni.wind-bearded.ts.net:8090. Required when omni_ready = true." + description = "Omni gRPC endpoint for SideroLink, e.g. omni.example.com:8090. Required when omni_ready = true." type = string default = null }