Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
120 changes: 65 additions & 55 deletions docs/modules/ROOT/pages/how-tos/cloudscale/install.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
:k8s-minor-version: 1.32
:ocp-patch-version: {ocp-minor-version}.10
:provider: cloudscale
:needs_hieradata_edit: no

[abstract]
--
Expand Down Expand Up @@ -40,6 +41,7 @@ include::partial$install/prerequisites.adoc[]
* `mc` >= `RELEASE.2024-01-18T07-03-39Z` https://docs.min.io/docs/minio-client-quickstart-guide.html[Minio client] (aliased to `mc` if necessary)
* `aws` CLI https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html[Official install instructions].
You can also install the Python package with your favorite package manager (we recommend https://docs.astral.sh/uv/[`uv`]: `uv tool install awscli`).
* `python3` as `python`


[WARNING]
Expand All @@ -53,13 +55,12 @@ include::partial$install/register.adoc[]

=== Configure input

Create 2 new cloudscale API tokens with read+write permissions and name them *`<cluster_id>`* and *`<cluster_id>_floaty`* on https://control.cloudscale.ch/service/<your-project>/api-token.
Create a new cloudscale API token with read+write permissions and name *`<cluster_id>`* on https://control.cloudscale.ch/service/<your-project>/api-token.

.Access to cloud API
[source,bash]
----
export CLOUDSCALE_API_TOKEN=<cloudscale-api-token>
export TF_VAR_lb_cloudscale_api_secret=<cloudscale-api-token-for-Floaty>
----

include::partial$install/vshn-input.adoc[]
Expand All @@ -70,6 +71,68 @@ include::partial$install/vshn-input.adoc[]
export REGION=$(curl -sH "Authorization: Bearer $(commodore fetch-token)" ${COMMODORE_API_URL}/clusters/${CLUSTER_ID} | jq -r .facts.region)
----

=== Create private network and subnet

. Create a private network via cloudscale API
+
[source,bash]
----
response=$(curl -sH"Authorization: Bearer ${CLOUDSCALE_API_TOKEN}" \
https://api.cloudscale.ch/v1/networks \
-F name="privnet_${CLUSTER_ID}" \
-F zone="${REGION}1" \
-F mtu=9000 \
-F auto_create_ipv4_subnet=false)
export NETWORK_UUID=$(echo "$response" | jq -r '.uuid')
----

. Create a subnet in the private network via cloudscale API
+
[TIP]
====
Customize `PRIVNET_CIDR` if you want to use a different CIDR for the cluster.

Use a custom value for `GATEWAY_ADDR` if you don't want to use `.1` in the configured network CIDR for the default gateway.
====
+
[source,bash]
----
PRIVNET_CIDR="172.18.200.0/24"

GATEWAY_ADDR=$(python -c \
"import ipaddress; print(next(ipaddress.ip_network(\"${PRIVNET_CIDR}\").hosts()))")

response=$(curl -sH"Authorization: Bearer ${CLOUDSCALE_API_TOKEN}" \
https://api.cloudscale.ch/v1/subnets \
-F network="${NETWORK_UUID}" \
-F cidr="${PRIVNET_CIDR}" \
-F gateway_address="${GATEWAY_ADDR}")
export SUBNET_UUID=$(echo "$response" | jq -r '.uuid')
----

. Create a floating IP to use as the NAT source IP via cloudscale API
+
[source,bash]
----
TBD if actually possible
----

. Ask cloudscale to provision a NAT gateway via chat.
Run the command and provide the output with your request.
+
[source]
----
cat <<EOF
---
Network UUID: ${NETWORK_UUID}
Subnet UUID: ${SUBNET_UUID}
Nat Gateway Name: natgw_${CLUSTER_ID}
Gateway IP: ${GATEWAY_ADDR}
NAT source IP: TBD if possible
---
EOF
----

[#_bootstrap_bucket]
=== Set up S3 buckets for the cluster

Expand Down Expand Up @@ -257,10 +320,6 @@ vault kv put clusters/kv/${TENANT_ID}/${CLUSTER_ID}/cloudscale \
s3_access_key=$(mc alias list ${CLUSTER_ID} -json | jq -r .accessKey) \
s3_secret_key=$(mc alias list ${CLUSTER_ID} -json | jq -r .secretKey)

# Put LB API key in Vault
vault kv put clusters/kv/${TENANT_ID}/${CLUSTER_ID}/floaty \
iam_secret=${TF_VAR_lb_cloudscale_api_secret}

# Generate an HTTP secret for the registry
vault kv put clusters/kv/${TENANT_ID}/${CLUSTER_ID}/registry \
httpSecret=$(LC_ALL=C tr -cd "A-Za-z0-9" </dev/urandom | head -c 128)
Expand All @@ -274,8 +333,6 @@ vault kv put clusters/kv/${TENANT_ID}/${CLUSTER_ID}/cluster-backup \
password=$(LC_ALL=C tr -cd "A-Za-z0-9" </dev/urandom | head -c 32)
----

include::partial$get-hieradata-token-from-vault.adoc[]

include::partial$install/prepare-commodore.adoc[]

[#_configure_installer]
Expand Down Expand Up @@ -309,53 +366,6 @@ include::partial$cloudscale/configure-terraform-secrets.adoc[]

include::partial$setup_terraform.adoc[]

. Create LB hieradata
+
[source,bash]
----
cat > override.tf <<EOF
module "cluster" {
bootstrap_count = 0
master_count = 0
infra_count = 0
worker_count = 0
additional_worker_groups = {}
}
EOF
terraform apply -target "module.cluster.module.lb.module.hiera"
----

. Review and merge the LB hieradata MR (listed in Terraform output `hieradata_mr`) and wait until the deploy pipeline after the merge is completed.

. Create LBs
+
[source,bash]
----
terraform apply
----

. Setup the DNS records shown in output variable `dns_entries` from the previous step in the cluster's parent zone.
If you use a custom apps domain, make the necessary changes to the DNS record for `*.apps`.

. Make LB FQDNs available for later steps
+
.Store LB FQDNs in environment
[source,bash]
----
declare -a LB_FQDNS
for id in 1 2; do
LB_FQDNS[$id]=$(terraform state show "module.cluster.module.lb.cloudscale_server.lb[$(expr $id - 1)]" | grep fqdn | awk '{print $2}' | tr -d ' "\r\n')
done
----
+
.Verify FQDNs
[source,bash]
----
for lb in "${LB_FQDNS[@]}"; do echo $lb; done
----

include::partial$install/bootstrap-lb.adoc[]

include::partial$install/bootstrap-nodes.adoc[]

include::partial$install/finalize_part1.adoc[]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,5 @@
cat <<EOF > ./terraform.env
CLOUDSCALE_API_TOKEN
TF_VAR_ignition_bootstrap
TF_VAR_lb_cloudscale_api_secret
TF_VAR_control_vshn_net_token
GIT_AUTHOR_NAME
GIT_AUTHOR_EMAIL
HIERADATA_REPO_TOKEN
EOF
----
19 changes: 7 additions & 12 deletions docs/modules/ROOT/partials/install/bootstrap-nodes.adoc
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

. Deploy bootstrap node
+
[source,bash,subs="attributes+"]
Expand All @@ -18,7 +17,10 @@ EOF
terraform apply
----

ifeval::["{provider}" != "stackit"]
ifeval::["{provider}" == "cloudscale"]
. Setup the DNS records shown in Terraform output `cluster_dns`
endif::[]
ifeval::["{provider}" == "exoscale"]
. Review and merge the LB hieradata MR (listed in Terraform output `hieradata_mr`) and run Puppet on the LBs after the deploy job has completed
+
[source,bash]
Expand All @@ -37,21 +39,18 @@ ifeval::["{provider}" == "stackit"]
--
endif::[]
ifeval::["{provider}" == "cloudscale"]
. Store the subnet UUID and ingress floating IP in the cluster configuration
. Store the ingress floating IP in the cluster configuration
+
[source,bash]
----
export SUBNET_UUID="$(terraform output -raw subnet_uuid)"
export INGRESS_FLOATING_IP="$(terraform output -raw router_vip)"

pushd ../../../inventory/classes/${TENANT_ID}

yq eval -i '.parameters.openshift.cloudscale.subnet_uuid = "'$SUBNET_UUID'"' \
${CLUSTER_ID}.yml
yq eval -i '.parameters.openshift.cloudscale.ingress_floating_ip_v4 = "'$INGRESS_FLOATING_IP'"' \
${CLUSTER_ID}.yml

git commit -am "Configure cloudscale subnet UUID and ingress floating IP for ${CLUSTER_ID}"
git commit -am "Configure cloudscale ingress floating IP for ${CLUSTER_ID}"
git push
popd
popd # yes, twice.
Expand Down Expand Up @@ -148,10 +147,6 @@ EOF
terraform apply
----

ifeval::["{provider}" == "cloudscale"]
. Add the DNS records for etcd shown in output variable `dns_entries` from the previous step to the cluster's parent zone
endif::[]

. Wait for master nodes to become ready
+
TIP: This is optional, but will make the subsequent steps less likely to run into weird timeouts.
Expand Down Expand Up @@ -260,7 +255,7 @@ terraform apply
popd
----

ifeval::["{provider}" != "stackit"]
ifeval::["{provider}" == "exoscale"]
. Review and merge the LB hieradata MR (listed in Terraform output `hieradata_mr`) and run Puppet on the LBs after the deploy job has completed
+
[source,bash]
Expand Down
5 changes: 1 addition & 4 deletions docs/modules/ROOT/partials/install/finalize_part2.adoc
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
=== Finalize installation

ifeval::["{provider}" == "cloudscale"]
include::partial$install/finalize_part2_cloudscale_exoscale.adoc[]
endif::[]
ifeval::["{provider}" == "exoscale"]
include::partial$install/finalize_part2_cloudscale_exoscale.adoc[]
include::partial$install/finalize_part2_exoscale.adoc[]
endif::[]
ifeval::["{provider}" == "vsphere"]
include::partial$install/finalize_part2_ipi.adoc[]
Expand Down
2 changes: 1 addition & 1 deletion docs/modules/ROOT/partials/install/prepare-commodore.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ ifeval::["{provider}" == "cloudscale"]
+
[source,bash,subs="attributes"]
----
yq eval -i '.parameters.openshift.cloudscale.subnet_uuid = "TO_BE_DEFINED"' ${CLUSTER_ID}.yml
yq eval -i '.parameters.openshift.cloudscale.subnet_uuid = "'"${SUBNET_UUID}"'"' ${CLUSTER_ID}.yml

yq eval -i '.parameters.openshift.cloudscale.rhcos_image_slug = "rhcos-{ocp-minor-version}"' \
${CLUSTER_ID}.yml
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
ifeval::["{provider}" != "cloudscale"]
. Set team responsible for handling Icinga alerts
+
[source,bash]
Expand All @@ -6,6 +7,7 @@
# e.g. TEAM=aldebaran
TEAM=<team-name>
----
endif::[]

. Prepare Terraform cluster config
+
Expand All @@ -32,9 +34,12 @@ yq eval -i ".parameters.openshift4_terraform.terraform_variables.ssh_keys = [\"$

yq eval -i ".parameters.openshift4_terraform.terraform_variables.allocate_router_vip_for_lb_controller = true" \
${CLUSTER_ID}.yml

yq eval -i ".parameters.openshift4_terraform.terraform_variables.subnet_uuid = \"${SUBNET_UUID}\"" \
${CLUSTER_ID}.yml
endif::[]

ifeval::["{provider}" != "stackit"]
ifeval::["{provider}" == "exoscale"]
yq eval -i ".parameters.openshift4_terraform.terraform_variables.team = \"${TEAM}\"" \
${CLUSTER_ID}.yml

Expand Down
5 changes: 3 additions & 2 deletions docs/modules/ROOT/partials/install/prepare-syn-config.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,11 @@ yq eval -i ".parameters.openshift.appsDomain = \"${APPS_DOMAIN}\"" \
By default, the cluster's update channel is derived from the cluster's reported OpenShift version.
If you want to use a custom update channel, make sure to set `parameters.openshift4_version.spec.channel` accordingly.

[source,bash]
[source,bash,subs="attributes+"]
----
# Configure the OpenShift update channel as `fast`
yq eval -i ".parameters.openshift4_version.spec.channel = \"fast-{ocp-minor-version}\"" \
yq eval -i \
".parameters.openshift_upgrade_controller.cluster_version.spec.template.spec.channel = \"fast-{ocp-minor-version}\"" \
${CLUSTER_ID}.yml
----
====
Expand Down