From cc4e144b6a09b554d1cf236c9c4007e7a519d94c Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Wed, 19 Nov 2025 13:23:04 -0600 Subject: [PATCH 01/11] docs: Relabel GCP guides as legacy Terraform guides - Moves the install, upgrade, and configuration docs into a subdirectory called Terraform Provider (Legacy) - Creates aliases for each of the moved pages - `Installation > Install on GCP `-> `Installation > Install on GCP > Terraform Provider (Legacy) > Install` - Removed terraform references from the Deployment guideline appendix and pushed them into the Configuration appendix - Changed from "Required configuration" to just "configuration" docs: Relabel AWS guides as legacy Terraform guides - Does the same work as the GCP commit docs: Relabel Azure guides as legacy Terraform guides - Does the same thing as the GCP commit - Removes terraform configuation values given they weren't rendered. e.g. enable_disk_support Update doc links Add docs for new terraform modules. --- doc/user/content/installation/_index.md | 2 +- .../installation/appendix-terraforms.md | 17 +- .../installation/install-on-aws/_index.md | 476 +------------- .../appendix-deployment-guidelines.md | 46 +- .../legacy-terraform-module/_index.md | 23 + .../appendix-configuration.md} | 18 +- .../legacy-terraform-module/install.md | 481 +++++++++++++++ .../upgrade.md} | 4 +- .../install-on-aws/terraform-module/_index.md | 148 +++++ .../installation/install-on-azure/_index.md | 518 +--------------- .../appendix-deployment-guidelines.md | 50 +- .../legacy-terraform-module/_index.md | 23 + .../appendix-configuration.md} | 18 +- .../legacy-terraform-module/install.md | 522 ++++++++++++++++ .../upgrade.md} | 4 +- .../terraform-module/_index.md | 154 +++++ .../installation/install-on-gcp/_index.md | 579 +---------------- .../appendix-deployment-guidelines.md | 57 +- .../legacy-terraform-module/_index.md | 24 + .../appendix-configuration.md} | 36 +- .../legacy-terraform-module/install.md | 583 ++++++++++++++++++ .../upgrade.md} | 8 +- .../install-on-gcp/terraform-module/_index.md | 164 +++++ .../installation/operational-guidelines.md | 22 +- doc/user/content/installation/upgrading.md | 187 ++++++ .../self_managed/legacy_terraform_list.yml | 29 + doc/user/data/self_managed/terraform_list.yml | 33 +- .../self-managed/aws-terraform-configs.html | 4 +- .../self-managed/azure-terraform-configs.html | 2 +- misc/helm-charts/operator/README.md | 6 - misc/helm-charts/operator/README.md.gotmpl | 6 - 31 files changed, 2534 insertions(+), 1710 deletions(-) create mode 100644 doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md rename doc/user/content/installation/install-on-aws/{appendix-aws-configuration.md => legacy-terraform-module/appendix-configuration.md} (73%) create mode 100644 doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md rename doc/user/content/installation/install-on-aws/{upgrade-on-aws.md => legacy-terraform-module/upgrade.md} (98%) create mode 100644 doc/user/content/installation/install-on-aws/terraform-module/_index.md create mode 100644 doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md rename doc/user/content/installation/install-on-azure/{appendix-azure-configuration.md => legacy-terraform-module/appendix-configuration.md} (82%) create mode 100644 doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md rename doc/user/content/installation/install-on-azure/{upgrade-on-azure.md => legacy-terraform-module/upgrade.md} (98%) create mode 100644 doc/user/content/installation/install-on-azure/terraform-module/_index.md create mode 100644 doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md rename doc/user/content/installation/install-on-gcp/{appendix-gcp-configuration.md => legacy-terraform-module/appendix-configuration.md} (52%) create mode 100644 doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md rename doc/user/content/installation/install-on-gcp/{upgrade-on-gcp.md => legacy-terraform-module/upgrade.md} (97%) create mode 100644 doc/user/content/installation/install-on-gcp/terraform-module/_index.md create mode 100644 doc/user/content/installation/upgrading.md create mode 100644 doc/user/data/self_managed/legacy_terraform_list.yml diff --git a/doc/user/content/installation/_index.md b/doc/user/content/installation/_index.md index 5fdae97374c0f..c0550e9b16b61 100644 --- a/doc/user/content/installation/_index.md +++ b/doc/user/content/installation/_index.md @@ -1,5 +1,5 @@ --- -title: "Install/Upgrade (Self-Managed)" +title: "Self-Managed Deployments" description: "Installation and upgrade guides for Self-Managed Materialize." disable_list: true menu: diff --git a/doc/user/content/installation/appendix-terraforms.md b/doc/user/content/installation/appendix-terraforms.md index cfb6ae78dd999..de7ea46f42952 100644 --- a/doc/user/content/installation/appendix-terraforms.md +++ b/doc/user/content/installation/appendix-terraforms.md @@ -1,6 +1,6 @@ --- -title: "Appendix: Terraforms" +title: "Terraform Modules" description: "List of template Terraform modules that are available as a starting point." menu: @@ -10,7 +10,7 @@ menu: weight: 95 --- -To help you get started, Materialize provides some template Terraforms. +To help you get started, Materialize provides Terraform modules. {{< important >}} These modules are intended for evaluation/demonstration purposes and for serving @@ -25,11 +25,18 @@ your own production deployment, either: {{}} +### **Terraform Modules** + +Materialize provides a [**unified Terraform module**](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main?tab=readme-ov-file#materialize-self-managed-terraform-modules) in order to provide concrete examples and an opinionated model for deploying materialize. +This module supports deployments for AWS + {{< yaml-table data="self_managed/terraform_list" >}} -## Releases +### *Legacy Terraform Modules* + +{{< yaml-table data="self_managed/legacy_terraform_list" >}} -### Materialize on AWS Terraform module +#### Materialize on AWS Terraform module {{< yaml-table data="self_managed/aws_terraform_versions" >}} @@ -40,7 +47,7 @@ https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#u for release-specific upgrade notes. -### Materialize on Azure Terraform module +#### Materialize on Azure Terraform module {{< yaml-table data="self_managed/azure_terraform_versions" >}} diff --git a/doc/user/content/installation/install-on-aws/_index.md b/doc/user/content/installation/install-on-aws/_index.md index 4c16e703b7763..4e84093d164d2 100644 --- a/doc/user/content/installation/install-on-aws/_index.md +++ b/doc/user/content/installation/install-on-aws/_index.md @@ -1,6 +1,6 @@ --- -title: "Install on AWS (via Terraform)" -description: "" +title: "Install on AWS" +description: "Install and upgrade Materialize on AWS" aliases: - /self-hosted/install-on-aws/ - /self-managed/v25.1/installation/install-on-aws/ @@ -14,476 +14,14 @@ menu: {{% self-managed/materialize-components-sentence %}} -The tutorial deploys Materialize to AWS Elastic Kubernetes Service (EKS) with a -PostgreSQL RDS database as the metadata database and AWS S3 for blob storage. -The tutorial uses [Materialize on AWS Terraform -module](https://github.com/MaterializeInc/terraform-aws-materialize) to: - -- Set up the AWS Kubernetes environment. -- Call - [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) - module to deploy Materialize Operator and Materialize instances to that EKS - cluster. - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - -{{% self-managed/aws-recommended-instances %}} - -See [Appendix: AWS Deployment -guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) for -more information. - -## Prerequisites - -### Terraform - -If you don't have Terraform installed, [install -Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). - -### AWS CLI - -If you do not have the AWS CLI installed, install. For details, see the [AWS -documentation](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). - -### kubectl - -If you do not have `kubectl`, install. See the [Amazon EKS: install `kubectl` -documentation](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -for details. - -### Helm 3.2.0+ - -If you do not have Helm 3.2.0+, install. For details, see the [Helm -documentation](https://helm.sh/docs/intro/install/). - -### License key - -Starting in v26.0, Self-Managed Materialize requires a license key. - -{{< yaml-table data="self_managed/license_key" >}} - -## Set up AWS Kubernetes environment and install Materialize - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< self-managed/tutorial-disclaimer >}} - -{{< /warning >}} - -{{< tabs >}} - -{{< tab "Deployed components" >}} - -[Materialize on AWS Terraform -module](https://github.com/MaterializeInc/terraform-aws-materialize/blob/main/README.md) -deploys a sample infrastructure on AWS (region `us-east-1`) with the following -components: - -{{< yaml-table data="self_managed/aws_terraform_deployed_components" >}} - -{{< tip >}} -{{% self-managed/aws-terraform-configs %}} -{{< /tip >}} - -{{}} -{{< tab "Releases" >}} - -{{< yaml-table data="self_managed/aws_terraform_versions" >}} - -{{}} -{{}} - -1. Open a Terminal window. - -1. Configure AWS CLI with your AWS credentials. For details, see the [AWS - documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). - -{{% self-managed/versions/step-clone-aws-terraform-repo %}} - -1. Go to the `examples/simple` folder in the Materialize Terraform repo - directory. - - ```bash - cd terraform-aws-materialize/examples/simple - ``` - - {{< tip >}} - {{< self-managed/aws-terraform-configs >}} - {{< /tip >}} - -1. Create a `terraform.tfvars` file (you can copy from the - `terraform.tfvars.example` file) and specify the following variables: - - | Variable | Description | - |--------------------|-------------| - | `namespace` | A namespace (e.g., `my-demo`) that will be used to form part of the prefix for your AWS resources.
**Requirements:**
- Maximum of 12 characters
- Must start with a lowercase letter
- Must be lowercase alphanumeric and hyphens only | - | `environment` | An environment name (e.g., `dev`, `test`) that will be used to form part of the prefix for your AWS resources.
**Requirements:**
- Maximum of 8 characters
- Must be lowercase alphanumeric only | - - - ```bash - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-storage, ${namespace}-${environment}-db etc. - - namespace = "enter-namespace" // maximum 12 characters, start with a letter, contain lowercase alphanumeric and hyphens only (e.g. my-demo) - environment = "enter-environment" // maximum 8 characters, lowercase alphanumeric only (e.g., dev, test) - ``` - - {{< tip >}} - {{< self-managed/aws-terraform-configs >}} - {{< /tip >}} - -1. Initialize the terraform directory. - - ```bash - terraform init - ``` - -1. Use terraform plan to review the changes to be made. - - ```bash - terraform plan - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply - ``` - - To approve the changes and apply, enter `yes`. - - - - Upon successful completion, various fields and their values are output: - - ```none - Apply complete! Resources: 89 added, 0 changed, 0 destroyed. - - Outputs: - - cluster_certificate_authority_data = - database_endpoint = "my-demo-dev-db.abcdefg8dsto.us-east-1.rds.amazonaws.com:5432" - eks_cluster_endpoint = "https://0123456789A00BCD000E11BE12345A01.gr7.us-east-1.eks.amazonaws.com" - eks_cluster_name = "my-demo-dev-eks" - materialize_s3_role_arn = "arn:aws:iam::000111222333:role/my-demo-dev-mz-role" - metadata_backend_url = - nlb_details = [] - oidc_provider_arn = "arn:aws:iam::000111222333:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/7D14BCA3A7AA896A836782D96A24F958" - persist_backend_url = "s3://my-demo-dev-storage-f2def2a9/dev:serviceaccount:materialize-environment:12345678-1234-1234-1234-12345678912" - s3_bucket_name = "my-demo-dev-storage-f2def2a9" - vpc_id = "vpc-0abc000bed1d111bd" - ``` - -1. Note your specific values for the following fields: - - - `eks_cluster_name` (Used to configure `kubectl`) - -1. Configure `kubectl` to connect to your EKS cluster, replacing: - - - `` with the name of your EKS cluster. Your cluster - name has the form `{namespace}-{environment}-eks`; e.g., - `my-demo-dev-eks`. - - - `` with the region of your EKS cluster. The - simple example uses `us-east-1`. - - ```bash - aws eks update-kubeconfig --name --region - ``` - - To verify that you have configured correctly, run the following command: - - ```bash - kubectl get nodes - ``` - - For help with `kubectl` commands, see [kubectl Quick - reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). - -1. By default, the example Terraform installs the Materialize Operator and, - starting in v0.4.0, a `cert-manager`. Verify the installation and check the - status: - - {{< tabs >}} - {{< tab "Materialize Operator" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n materialize - ``` - - Wait for the components to be in the `Running` state: - - ```none - NAME READY STATUS RESTARTS AGE - pod/my-demo-dev-materialize-operator-84ff4b4648-brjhl 1/1 Running 0 12s - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/my-demo-dev-materialize-operator 1/1 1 1 12s - - NAME DESIRED CURRENT READY AGE - replicaset.apps/my-demo-dev-materialize-operator-84ff4b4648 1 1 1 12s - ``` - - {{}} - {{< tab "cert-manager (Starting in version 0.4.0)" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n cert-manager - ``` - Wait for the components to be in the `Running` state: - ``` - NAME READY STATUS RESTARTS AGE - pod/cert-manager-cainjector-686546c9f7-v9hwp 1/1 Running 0 4m20s - pod/cert-manager-d6746cf45-cdmb5 1/1 Running 0 4m20s - pod/cert-manager-webhook-5f79cd6f4b-rcjbq 1/1 Running 0 4m20s - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/cert-manager ClusterIP 172.20.2.136 9402/TCP 4m20s - service/cert-manager-cainjector ClusterIP 172.20.154.137 9402/TCP 4m20s - service/cert-manager-webhook ClusterIP 172.20.63.217 443/TCP,9402/TCP 4m20s - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/cert-manager 1/1 1 1 4m20s - deployment.apps/cert-manager-cainjector 1/1 1 1 4m20s - deployment.apps/cert-manager-webhook 1/1 1 1 4m20s - NAME DESIRED CURRENT READY AGE - replicaset.apps/cert-manager-cainjector-686546c9f7 1 1 1 4m20s - replicaset.apps/cert-manager-d6746cf45 1 1 1 4m20s - replicaset.apps/cert-manager-webhook-5f79cd6f4b 1 1 1 - 4m20s - ``` - - {{}} - {{}} - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting) guide. - -1. Once the Materialize operator is deployed and running, you can deploy the - Materialize instances. To deploy Materialize instances, create a - `mz_instances.tfvars` file with the [Materialize instance - configuration](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances). - - For example, the following specifies the configuration for a `demo`: - - ```bash - cat < mz_instances.tfvars - - materialize_instances = [ - { - name = "demo" - namespace = "materialize-environment" - database_name = "demo_db" - cpu_request = "1" - memory_request = "2Gi" - memory_limit = "2Gi" - license_key = "" - } - ] - EOF - ``` - - - **Starting in v26.0**, Self-Managed Materialize requires a license key. To - get your license key: - {{% yaml-table data="self_managed/license_key" %}} - - - **Starting in v0.3.0**, the Materialize on AWS Terraform module also - deploys, by default, Network Load Balancers (NLBs) for each Materialize - instance (i.e., the - [`create_nlb`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) - flag defaults to `true`). The NLBs, by default, are configured to be - internal (i.e., the - [`internal_nlb`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) - flag defaults to `true`). See [`materialize_instances`]( - https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) - for the Materialize instance configuration options. - - - **Starting in v0.4.0**, a self-signed `ClusterIssuer` is deployed by - default. The `ClusterIssuer` is deployed on subsequent after the - `cert-manager` is running. - - - **Starting in v0.4.6**, you can specify addition configuration options via - `environmentd_extra_args`. - - {{< tip >}} - {{% self-managed/aws-terraform-upgrade-notes %}} - - See [Materialize on AWS releases](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) for notable changes. - {{}} - -1. Run `terraform plan` with both `.tfvars` files and review the changes to be - made. - - ```bash - terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - The plan should show the changes to be made, with a summary similar to the - following: - - ``` - Plan: 17 to add, 1 to change, 0 to destroy. - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - To approve the changes and apply, enter `yes`. - - Upon successful completion, you should see output with a summary similar to - the following: - - - - ```bash - Apply complete! Resources: 17 added, 1 changed, 0 destroyed. - - Outputs: - - cluster_certificate_authority_data = - database_endpoint = "my-demo-dev-db.abcdefg8dsto.us-east-1.rds.amazonaws.com:5432" - eks_cluster_endpoint = "https://0123456789A00BCD000E11BE12345A01.gr7.us-east-1.eks.amazonaws.com" - eks_cluster_name = "my-demo-dev-eks" - materialize_s3_role_arn = "arn:aws:iam::000111222333:role/my-demo-dev-mz-role" - metadata_backend_url = - nlb_details = [ - "demo" = { - "arn" = "arn:aws:elasticloadbalancing:us-east-1:000111222333:loadbalancer/net/my-demo-dev/aeae3d936afebcfe" - "dns_name" = "my-demo-dev-aeae3d936afebcfe.elb.us-east-1.amazonaws.com" - } - ] - oidc_provider_arn = "arn:aws:iam::000111222333:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/7D14BCA3A7AA896A836782D96A24F958" - persist_backend_url = "s3://my-demo-dev-storage-f2def2a9/dev:serviceaccount:materialize-environment:12345678-1234-1234-1234-12345678912" - s3_bucket_name = "my-demo-dev-storage-f2def2a9" - vpc_id = "vpc-0abc000bed1d111bd" - ``` - - The Network Load Balancer (NLB) details `nlb_details` are available when - running the Terraform module v0.3.0+. - -1. Verify the installation and check the status: - - ```bash - kubectl get all -n materialize-environment - ``` - - Wait for the components to be in the `Running` state. - - ```none - NAME READY STATUS RESTARTS AGE - pod/create-db-demo-db-6swk7 0/1 Completed 0 33s - pod/mzutd2fbabf5-balancerd-6c9755c498-28kcw 1/1 Running 0 11s - pod/mzutd2fbabf5-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 11s - pod/mzutd2fbabf5-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 11s - pod/mzutd2fbabf5-console-57f94b4588-6lg2x 1/1 Running 0 4s - pod/mzutd2fbabf5-console-57f94b4588-v65lk 1/1 Running 0 4s - pod/mzutd2fbabf5-environmentd-1-0 1/1 Running 0 16s - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/mzutd2fbabf5-balancerd ClusterIP None 6876/TCP,6875/TCP 11s - service/mzutd2fbabf5-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 12s - service/mzutd2fbabf5-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 12s - service/mzutd2fbabf5-console ClusterIP None 8080/TCP 4s - service/mzutd2fbabf5-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 11s - service/mzutd2fbabf5-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 16s - service/mzutd2fbabf5-persist-pubsub-1 ClusterIP None 6879/TCP 16s - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/mzutd2fbabf5-balancerd 1/1 1 1 11s - deployment.apps/mzutd2fbabf5-console 2/2 2 2 4s - - NAME DESIRED CURRENT READY AGE - replicaset.apps/mzutd2fbabf5-balancerd-6c9755c498 1 1 1 11s - replicaset.apps/mzutd2fbabf5-console-57f94b4588 2 2 2 4s - - NAME READY AGE - statefulset.apps/mzutd2fbabf5-cluster-s2-replica-s1-gen-1 1/1 12s - statefulset.apps/mzutd2fbabf5-cluster-u1-replica-u1-gen-1 1/1 11s - statefulset.apps/mzutd2fbabf5-environmentd-1 1/1 16s - - NAME STATUS COMPLETIONS DURATION AGE - job.batch/create-db-demo-db Complete 1/1 11s 33s - ``` - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). - -1. Open the Materialize Console in your browser: - - {{< tabs >}} - - {{< tab "Via Network Load Balancer" >}} - - Starting in v0.3.0, for each Materialize instance, Materialize on AWS - Terraform module also deploys AWS Network Load Balancers (by default, - internal) with the following listeners, including a listener on port 8080 for - the Materialize Console: - - | Port | Description | - | ---- | ------------| - | 6875 | For SQL connections to the database | - | 6876 | For HTTP(S) connections to the database | - | **8080** | **For HTTP(S) connections to Materialize Console** | - - The Network Load Balancer (NLB) details are found in the `nlb_details` in - the [Terraform output](#aws-terrafrom-output). - - The example uses a self-signed ClusterIssuer. As such, you may encounter a - warning with regards to the certificate. In production, run with certificates - from an official Certificate Authority (CA) rather than self-signed - certificates. - - {{}} - - {{< tab "Via port forwarding" >}} - - {{% self-managed/port-forwarding-handling %}} - - {{}} - {{}} - - {{< tip >}} - - {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} - - {{< /tip >}} - -## Next steps - -{{% self-managed/next-steps %}} - -## Cleanup - -{{% self-managed/cleanup-cloud %}} - - {{< tip >}} - - - To delete your S3 bucket, you may need to empty the S3 bucket first. If the - `terraform destroy` command is unable to delete the S3 bucket and does not - progress beyond "Still destroying...", empty the S3 bucket first and rerun - the `terraform destroy` command. - - - Upon successful destroy, you may receive some informational messages with - regards to CustomResourceDefinition(CRD). You may safely ignore these - messages as your whole deployment has been destroyed, including the CRDs. - - {{}} +| Guide | Description | +|-------|-------------| +| [Terraform Provider](/installation/install-on-aws/terraform-module/) | Install Materialize on AWS using our new Unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-aws/legacy-terraform-module/) | Install Materialize on AWS using our Terraform Provider (legacy) | +| [Appendix: AWS deployment guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) | Additional guidelines for AWS deployments | ## See also - [Materialize Operator Configuration](/installation/configuration/) - [Troubleshooting](/installation/troubleshooting/) -- [Appendix: AWS Deployment -guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) - [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md index f8f8252b35695..755f7bfb926d1 100644 --- a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md @@ -14,47 +14,39 @@ menu: As a general guideline, we recommend: -- Processor Type: ARM-based CPU - -- Sizing: - - - If spill-to-disk is not enabled: 1:8 ratio of vCPU to GiB memory - - - If spill-to-disk is enabled (*Recommended*): 1:16 ratio of vCPU to GiB local - instance storage +- ARM-based CPU +- A 1:8 ratio of vCPU to GiB memory is recommended. +- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. {{% self-managed/aws-recommended-instances %}} ## Locally-attached NVMe storage -For optimal performance, Materialize requires fast, locally-attached NVMe -storage. Having a locally-attached storage allows Materialize to spill to disk -when operating on datasets larger than main memory as well as allows for a more -graceful degradation rather than OOMing. Network-attached storage (like EBS -volumes) can significantly degrade performance and is not supported. +Configuring swap on nodes to using locally-attached NVMe storage allows +Materialize to spill to disk when operating on datasets larger than main memory. +This setup can provide significant cost savings and provides a more graceful +degradation rather than OOMing. Network-attached storage (like EBS volumes) can +significantly degrade performance and is not supported. ### Swap support -Starting in v0.6.1 of Materialize on AWS Terraform, -disk support (using swap on NVMe instance storage) may be enabled for -Materialize. With this change, the Terraform: +***New Unified Terraform*** -- Creates a node group for Materialize. -- Configures NVMe instance store volumes as swap using a daemonset. -- Enables swap at the Kubelet. +The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws/examples/simple) supports configuring swap out of the box. -For swap support, the following configuration option is available: +***Legacy Terraform*** -- [`swap_enabled`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_swap_enabled) +The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_swap_enabled) variable. +With this change, the Terraform: + - Creates a node group for Materialize. + - Configures NVMe instance store volumes as swap using a daemonset. + - Enables swap at the Kubelet. See [Upgrade Notes](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#v061). - -## CPU affinity - -It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). -This ensures that each worker thread of Materialize is given exclusively access to a vCPU. Our benchmarks have shown this -to substantially improve the performance of compute-bound workloads. +{{< note >}} +If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +{{< /note >}} ## TLS diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md new file mode 100644 index 0000000000000..8c91d1c7a569c --- /dev/null +++ b/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md @@ -0,0 +1,23 @@ +--- +title: "Terraform Module (legacy)" +description: "" +disable_list: true +disable_toc: true +menu: + main: + parent: "install-on-aws" + identifier: "install-on-aws-legacy-terraform-module" + weight: 5 + + +--- + +The tutorials in this section show you how to deploy Materialize using the [Materialize on AWS Legacy Terraform +module](https://github.com/MaterializeInc/terraform-aws-materialize). + + +| Guide | Description | +|-------|-------------| +| [Install](/installation/install-on-aws/legacy-terraform-module/install/) | Install Materialize on AWS | +| [Upgrade](/installation/install-on-aws/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on AWS | +| [Appendix: AWS configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/) | Configuration for AWS deployments | diff --git a/doc/user/content/installation/install-on-aws/appendix-aws-configuration.md b/doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md similarity index 73% rename from doc/user/content/installation/install-on-aws/appendix-aws-configuration.md rename to doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md index e9e520e2c9cb8..70db19b5f3048 100644 --- a/doc/user/content/installation/install-on-aws/appendix-aws-configuration.md +++ b/doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md @@ -3,7 +3,7 @@ title: "Appendix: Required configuration" description: "Required configuration for Materialize on AWS Terraform." menu: main: - parent: "install-on-aws" + parent: "install-on-aws-legacy-terraform-module" identifier: "appendix-aws-provider-config" weight: 50 aliases: @@ -60,3 +60,19 @@ provider "helm" { } } ``` + +## Swap support + +Starting in v0.6.1 of Materialize on AWS Terraform, +disk support (using swap on NVMe instance storage) may be enabled for +Materialize. With this change, the Terraform: + +- Creates a node group for Materialize. +- Configures NVMe instance store volumes as swap using a daemonset. +- Enables swap at the Kubelet. + +For swap support, the following configuration option is available: + +- [`swap_enabled`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_swap_enabled) + +See [Upgrade Notes](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#v061). diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md b/doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md new file mode 100644 index 0000000000000..dc04c39636c1e --- /dev/null +++ b/doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md @@ -0,0 +1,481 @@ +--- +title: "Install" +description: "" +aliases: + - /self-hosted/install-on-aws/ +menu: + main: + parent: "install-on-aws-legacy-terraform-module" + identifier: "install-aws" + weight: 5 +--- + +{{% self-managed/materialize-components-sentence %}} + +The tutorial deploys Materialize to AWS Elastic Kubernetes Service (EKS) with a +PostgreSQL RDS database as the metadata database and AWS S3 for blob storage. +The tutorial uses [Materialize on AWS Terraform +module](https://github.com/MaterializeInc/terraform-aws-materialize) to: + +- Set up the AWS Kubernetes environment. +- Call + [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) + module to deploy Materialize Operator and Materialize instances to that EKS + cluster. + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +{{% self-managed/aws-recommended-instances %}} + +See [Appendix: AWS Deployment +guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) for +more information. + +## Prerequisites + +### Terraform + +If you don't have Terraform installed, [install +Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). + +### AWS CLI + +If you do not have the AWS CLI installed, install. For details, see the [AWS +documentation](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). + +### kubectl + +If you do not have `kubectl`, install. See the [Amazon EKS: install `kubectl` +documentation](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +for details. + +### Helm 3.2.0+ + +If you do not have Helm 3.2.0+, install. For details, see the [Helm +documentation](https://helm.sh/docs/intro/install/). + +### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +## Set up AWS Kubernetes environment and install Materialize + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< self-managed/tutorial-disclaimer >}} + +{{< /warning >}} + +{{< tabs >}} + +{{< tab "Deployed components" >}} + +[Materialize on AWS Terraform +module](https://github.com/MaterializeInc/terraform-aws-materialize/blob/main/README.md) +deploys a sample infrastructure on AWS (region `us-east-1`) with the following +components: + +{{< yaml-table data="self_managed/aws_terraform_deployed_components" >}} + +{{< tip >}} +{{% self-managed/aws-terraform-configs %}} +{{< /tip >}} + +{{}} +{{< tab "Releases" >}} + +{{< yaml-table data="self_managed/aws_terraform_versions" >}} + +{{}} +{{}} + +1. Open a Terminal window. + +1. Configure AWS CLI with your AWS credentials. For details, see the [AWS + documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). + +{{% self-managed/versions/step-clone-aws-terraform-repo %}} + +1. Go to the `examples/simple` folder in the Materialize Terraform repo + directory. + + ```bash + cd terraform-aws-materialize/examples/simple + ``` + + {{< tip >}} + {{< self-managed/aws-terraform-configs >}} + {{< /tip >}} + +1. Create a `terraform.tfvars` file (you can copy from the + `terraform.tfvars.example` file) and specify the following variables: + + | Variable | Description | + |--------------------|-------------| + | `namespace` | A namespace (e.g., `my-demo`) that will be used to form part of the prefix for your AWS resources.
**Requirements:**
- Maximum of 12 characters
- Must start with a lowercase letter
- Must be lowercase alphanumeric and hyphens only | + | `environment` | An environment name (e.g., `dev`, `test`) that will be used to form part of the prefix for your AWS resources.
**Requirements:**
- Maximum of 8 characters
- Must be lowercase alphanumeric only | + + + ```bash + # The namespace and environment variables are used to construct the names of the resources + # e.g. ${namespace}-${environment}-storage, ${namespace}-${environment}-db etc. + + namespace = "enter-namespace" // maximum 12 characters, start with a letter, contain lowercase alphanumeric and hyphens only (e.g. my-demo) + environment = "enter-environment" // maximum 8 characters, lowercase alphanumeric only (e.g., dev, test) + ``` + + {{< tip >}} + {{< self-managed/aws-terraform-configs >}} + {{< /tip >}} + +1. Initialize the terraform directory. + + ```bash + terraform init + ``` + +1. Use terraform plan to review the changes to be made. + + ```bash + terraform plan + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply + ``` + + To approve the changes and apply, enter `yes`. + + + + Upon successful completion, various fields and their values are output: + + ```none + Apply complete! Resources: 89 added, 0 changed, 0 destroyed. + + Outputs: + + cluster_certificate_authority_data = + database_endpoint = "my-demo-dev-db.abcdefg8dsto.us-east-1.rds.amazonaws.com:5432" + eks_cluster_endpoint = "https://0123456789A00BCD000E11BE12345A01.gr7.us-east-1.eks.amazonaws.com" + eks_cluster_name = "my-demo-dev-eks" + materialize_s3_role_arn = "arn:aws:iam::000111222333:role/my-demo-dev-mz-role" + metadata_backend_url = + nlb_details = [] + oidc_provider_arn = "arn:aws:iam::000111222333:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/7D14BCA3A7AA896A836782D96A24F958" + persist_backend_url = "s3://my-demo-dev-storage-f2def2a9/dev:serviceaccount:materialize-environment:12345678-1234-1234-1234-12345678912" + s3_bucket_name = "my-demo-dev-storage-f2def2a9" + vpc_id = "vpc-0abc000bed1d111bd" + ``` + +1. Note your specific values for the following fields: + + - `eks_cluster_name` (Used to configure `kubectl`) + +1. Configure `kubectl` to connect to your EKS cluster, replacing: + + - `` with the name of your EKS cluster. Your cluster + name has the form `{namespace}-{environment}-eks`; e.g., + `my-demo-dev-eks`. + + - `` with the region of your EKS cluster. The + simple example uses `us-east-1`. + + ```bash + aws eks update-kubeconfig --name --region + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl get nodes + ``` + + For help with `kubectl` commands, see [kubectl Quick + reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +1. By default, the example Terraform installs the Materialize Operator and, + starting in v0.4.0, a `cert-manager`. Verify the installation and check the + status: + + {{< tabs >}} + {{< tab "Materialize Operator" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n materialize + ``` + + Wait for the components to be in the `Running` state: + + ```none + NAME READY STATUS RESTARTS AGE + pod/my-demo-dev-materialize-operator-84ff4b4648-brjhl 1/1 Running 0 12s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/my-demo-dev-materialize-operator 1/1 1 1 12s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/my-demo-dev-materialize-operator-84ff4b4648 1 1 1 12s + ``` + + {{}} + {{< tab "cert-manager (Starting in version 0.4.0)" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n cert-manager + ``` + Wait for the components to be in the `Running` state: + ``` + NAME READY STATUS RESTARTS AGE + pod/cert-manager-cainjector-686546c9f7-v9hwp 1/1 Running 0 4m20s + pod/cert-manager-d6746cf45-cdmb5 1/1 Running 0 4m20s + pod/cert-manager-webhook-5f79cd6f4b-rcjbq 1/1 Running 0 4m20s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/cert-manager ClusterIP 172.20.2.136 9402/TCP 4m20s + service/cert-manager-cainjector ClusterIP 172.20.154.137 9402/TCP 4m20s + service/cert-manager-webhook ClusterIP 172.20.63.217 443/TCP,9402/TCP 4m20s + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/cert-manager 1/1 1 1 4m20s + deployment.apps/cert-manager-cainjector 1/1 1 1 4m20s + deployment.apps/cert-manager-webhook 1/1 1 1 4m20s + NAME DESIRED CURRENT READY AGE + replicaset.apps/cert-manager-cainjector-686546c9f7 1 1 1 4m20s + replicaset.apps/cert-manager-d6746cf45 1 1 1 4m20s + replicaset.apps/cert-manager-webhook-5f79cd6f4b 1 1 1 + 4m20s + ``` + + {{}} + {{}} + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting) guide. + +1. Once the Materialize operator is deployed and running, you can deploy the + Materialize instances. To deploy Materialize instances, create a + `mz_instances.tfvars` file with the [Materialize instance + configuration](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances). + + For example, the following specifies the configuration for a `demo` instance. + + ```bash + cat < mz_instances.tfvars + + materialize_instances = [ + { + name = "demo" + namespace = "materialize-environment" + database_name = "demo_db" + cpu_request = "1" + memory_request = "2Gi" + memory_limit = "2Gi" + license_key = "" + } + ] + EOF + ``` + + - **Starting in v0.3.0**, the Materialize on AWS Terraform module also + deploys, by default, Network Load Balancers (NLBs) for each Materialize + instance (i.e., the + [`create_nlb`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) + flag defaults to `true`). The NLBs, by default, are configured to be + internal (i.e., the + [`internal_nlb`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) + flag defaults to `true`). See [`materialize_instances`]( + https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_materialize_instances) + for the Materialize instance configuration options. + + - **Starting in v0.4.0**, a self-signed `ClusterIssuer` is deployed by + default. The `ClusterIssuer` is deployed on subsequent after the + `cert-manager` is running. + + - **Starting in v0.4.6**, you can specify addition configuration options via + `environmentd_extra_args`. + + {{< tip >}} + {{% self-managed/aws-terraform-upgrade-notes %}} + + See [Materialize on AWS releases](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) for notable changes. + {{}} + +1. Run `terraform plan` with both `.tfvars` files and review the changes to be + made. + + ```bash + terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + The plan should show the changes to be made, with a summary similar to the + following: + + ``` + Plan: 17 to add, 1 to change, 0 to destroy. + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + To approve the changes and apply, enter `yes`. + + Upon successful completion, you should see output with a summary similar to + the following: + + + + ```bash + Apply complete! Resources: 17 added, 1 changed, 0 destroyed. + + Outputs: + + cluster_certificate_authority_data = + database_endpoint = "my-demo-dev-db.abcdefg8dsto.us-east-1.rds.amazonaws.com:5432" + eks_cluster_endpoint = "https://0123456789A00BCD000E11BE12345A01.gr7.us-east-1.eks.amazonaws.com" + eks_cluster_name = "my-demo-dev-eks" + materialize_s3_role_arn = "arn:aws:iam::000111222333:role/my-demo-dev-mz-role" + metadata_backend_url = + nlb_details = [ + "demo" = { + "arn" = "arn:aws:elasticloadbalancing:us-east-1:000111222333:loadbalancer/net/my-demo-dev/aeae3d936afebcfe" + "dns_name" = "my-demo-dev-aeae3d936afebcfe.elb.us-east-1.amazonaws.com" + } + ] + oidc_provider_arn = "arn:aws:iam::000111222333:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/7D14BCA3A7AA896A836782D96A24F958" + persist_backend_url = "s3://my-demo-dev-storage-f2def2a9/dev:serviceaccount:materialize-environment:12345678-1234-1234-1234-12345678912" + s3_bucket_name = "my-demo-dev-storage-f2def2a9" + vpc_id = "vpc-0abc000bed1d111bd" + ``` + + The Network Load Balancer (NLB) details `nlb_details` are available when + running the Terraform module v0.3.0+. + +1. Verify the installation and check the status: + + ```bash + kubectl get all -n materialize-environment + ``` + + Wait for the components to be in the `Running` state. + + ```none + NAME READY STATUS RESTARTS AGE + pod/create-db-demo-db-6swk7 0/1 Completed 0 33s + pod/mzutd2fbabf5-balancerd-6c9755c498-28kcw 1/1 Running 0 11s + pod/mzutd2fbabf5-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 11s + pod/mzutd2fbabf5-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 11s + pod/mzutd2fbabf5-console-57f94b4588-6lg2x 1/1 Running 0 4s + pod/mzutd2fbabf5-console-57f94b4588-v65lk 1/1 Running 0 4s + pod/mzutd2fbabf5-environmentd-1-0 1/1 Running 0 16s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/mzutd2fbabf5-balancerd ClusterIP None 6876/TCP,6875/TCP 11s + service/mzutd2fbabf5-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 12s + service/mzutd2fbabf5-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 12s + service/mzutd2fbabf5-console ClusterIP None 8080/TCP 4s + service/mzutd2fbabf5-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 11s + service/mzutd2fbabf5-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 16s + service/mzutd2fbabf5-persist-pubsub-1 ClusterIP None 6879/TCP 16s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/mzutd2fbabf5-balancerd 1/1 1 1 11s + deployment.apps/mzutd2fbabf5-console 2/2 2 2 4s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/mzutd2fbabf5-balancerd-6c9755c498 1 1 1 11s + replicaset.apps/mzutd2fbabf5-console-57f94b4588 2 2 2 4s + + NAME READY AGE + statefulset.apps/mzutd2fbabf5-cluster-s2-replica-s1-gen-1 1/1 12s + statefulset.apps/mzutd2fbabf5-cluster-u1-replica-u1-gen-1 1/1 11s + statefulset.apps/mzutd2fbabf5-environmentd-1 1/1 16s + + NAME STATUS COMPLETIONS DURATION AGE + job.batch/create-db-demo-db Complete 1/1 11s 33s + ``` + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting/). + +1. Open the Materialize Console in your browser: + + {{< tabs >}} + + {{< tab "Via Network Load Balancer" >}} + + Starting in v0.3.0, for each Materialize instance, Materialize on AWS + Terraform module also deploys AWS Network Load Balancers (by default, + internal) with the following listeners, including a listener on port 8080 for + the Materialize Console: + + | Port | Description | + | ---- | ------------| + | 6875 | For SQL connections to the database | + | 6876 | For HTTP(S) connections to the database | + | **8080** | **For HTTP(S) connections to Materialize Console** | + + The Network Load Balancer (NLB) details are found in the `nlb_details` in + the [Terraform output](#aws-terrafrom-output). + + The example uses a self-signed ClusterIssuer. As such, you may encounter a + warning with regards to the certificate. In production, run with certificates + from an official Certificate Authority (CA) rather than self-signed + certificates. + + {{}} + + {{< tab "Via port forwarding" >}} + + {{% self-managed/port-forwarding-handling %}} + + {{}} + {{}} + + {{< tip >}} + + {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} + + {{< /tip >}} + +## Next steps + +{{% self-managed/next-steps %}} + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + + {{< tip >}} + + - To delete your S3 bucket, you may need to empty the S3 bucket first. If the + `terraform destroy` command is unable to delete the S3 bucket and does not + progress beyond "Still destroying...", empty the S3 bucket first and rerun + the `terraform destroy` command. + + - Upon successful destroy, you may receive some informational messages with + regards to CustomResourceDefinition(CRD). You may safely ignore these + messages as your whole deployment has been destroyed, including the CRDs. + + {{}} + +## See also + +- [Materialize Operator Configuration](/installation/configuration/) +- [Troubleshooting](/installation/troubleshooting/) +- [Appendix: AWS Deployment +guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) +- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-aws/upgrade-on-aws.md b/doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md similarity index 98% rename from doc/user/content/installation/install-on-aws/upgrade-on-aws.md rename to doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md index 6deb0e7ee509c..de77568fc4085 100644 --- a/doc/user/content/installation/install-on-aws/upgrade-on-aws.md +++ b/doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md @@ -1,9 +1,9 @@ --- -title: "Upgrade on AWS (Terraform)" +title: "Upgrade" description: "Procedure to upgrade your Materialize operator and instances running on AWS" menu: main: - parent: "install-on-aws" + parent: "install-on-aws-legacy-terraform-module" identifier: "upgrade-on-aws" weight: 10 --- diff --git a/doc/user/content/installation/install-on-aws/terraform-module/_index.md b/doc/user/content/installation/install-on-aws/terraform-module/_index.md new file mode 100644 index 0000000000000..475455fa85d85 --- /dev/null +++ b/doc/user/content/installation/install-on-aws/terraform-module/_index.md @@ -0,0 +1,148 @@ +--- +title: "Terraform Module" +description: "" +menu: + main: + parent: "install-on-aws" + identifier: "install-aws-terraform" + weight: 5 +--- + +Materialize provides a set of modular Terraform modules that can be used to +deploy all services required for a production ready Materialize database. +The module is intended to provide a simple set of examples on how to deploy +materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +The repository can be found at: + +***[Materialize Terraform Self-Managed AWS](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws)*** + +Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) documentation for a full understanding +of the module structure and customizations. + +Also check out the [AWS deployment guide](/installation/install-on-aws/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. + +{{< note >}} +{{% self-managed/materialize-components-sentence %}} +{{< /note >}} + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + + +## Prerequisites + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [AWS Cli ](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- [`kubectl`](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +#### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +--- + +# Example: Simple Materialize Deployment on AWS + +This example demonstrates how to deploy a complete Materialize environment on AWS using the modular Terraform setup from this repository. + + +## Setup +```shell +git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git +cd materialize-terraform-self-managed/aws/examples/simple +```` + + +## What Gets Created + +This example provisions the following infrastructure: + +### Networking +- **VPC**: 10.0.0.0/16 with DNS hostnames and support enabled +- **Subnets**: 3 private subnets (10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24) and 3 public subnets (10.0.101.0/24, 10.0.102.0/24, 10.0.103.0/24) across availability zones us-east-1a, us-east-1b, us-east-1c +- **NAT Gateway**: Single NAT Gateway for all private subnets +- **Internet Gateway**: For public subnet connectivity + +### Compute +- **EKS Cluster**: Version 1.32 with CloudWatch logging (API, audit) +- **Base Node Group**: 2 nodes (t4g.medium) for Karpenter and CoreDNS +- **Karpenter**: Auto-scaling controller with two node classes: + - Generic nodepool: t4g.xlarge instances for general workloads + - Materialize nodepool: r7gd.2xlarge instances with swap enabled and dedicated taints to run materialize instance workloads. + +### Database +- **RDS PostgreSQL**: Version 15, db.t3.large instance +- **Storage**: 50GB allocated, autoscaling up to 100GB +- **Deployment**: Single-AZ (non-production configuration) +- **Backups**: 7-day retention +- **Security**: Dedicated security group with access from EKS cluster and nodes + +### Storage +- **S3 Bucket**: Dedicated bucket for Materialize persistence +- **Encryption**: Disabled (for testing; enable in production) +- **Versioning**: Disabled (for testing; enable in production) +- **IAM Role**: IRSA role for Kubernetes service account access + +### Kubernetes Add-ons +- **AWS Load Balancer Controller**: For managing Network Load Balancers +- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal +- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +### Materialize +- **Operator**: Materialize Kubernetes operator +- **Instance**: Single Materialize instance in `materialize-environment` namespace +- **Network Load Balancer**: Dedicated internal NLB for Materialize access (ports 6875, 6876, 8080) + +--- + +## Getting Started + +### Step 1: Set Required Variables + +Before running Terraform, create a `terraform.tfvars` file with the following variables: + +```hcl +name_prefix = "simple-demo" +aws_region = "us-east-1" +aws_profile = "your-aws-profile" +license_key = "your-materialize-license-key" # Get from https://materialize.com/self-managed/ +tags = { + environment = "demo" +} +``` + +**Required Variables:** +- `name_prefix`: Prefix for all resource names +- `aws_region`: AWS region for deployment +- `aws_profile`: AWS CLI profile to use +- `tags`: Map of tags to apply to resources +- `license_key`: Materialize license key + +--- + +### Step 2: Deploy Materialize + +Run the usual Terraform workflow: + +```bash +terraform init +terraform apply +``` + +--- + +## Notes + +* You can customize each module independently. +* To reduce cost in your demo environment, you can tweak subnet CIDRs and instance types in `main.tf`. + +***Don't forget to destroy resources when finished:*** +```bash +terraform destroy +``` diff --git a/doc/user/content/installation/install-on-azure/_index.md b/doc/user/content/installation/install-on-azure/_index.md index d61eba7238635..be733f8a566ac 100644 --- a/doc/user/content/installation/install-on-azure/_index.md +++ b/doc/user/content/installation/install-on-azure/_index.md @@ -1,6 +1,6 @@ --- -title: "Install on Azure (via Terraform)" -description: "Install Materialize on Azure Kubernetes Service (AKS) using Terraform" +title: "Install on Azure" +description: "Install and upgrade Materialize on Azure" disable_list: true menu: main: @@ -13,518 +13,14 @@ aliases: {{% self-managed/materialize-components-sentence blobstorage="blob storage; specifically **block** blob storage on Azure" %}} -The tutorial deploys Materialize to Azure Kubernetes Service (AKS) with a -PostgreSQL database as the metadata database and Azure premium block blob -storage for blob storage. The tutorial uses [Materialize on Azure Terraform -modules](https://github.com/MaterializeInc/terraform-azurerm-materialize) to: - -- Set up the Azure Kubernetes environment -- Call - [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) - module to deploy Materialize Operator and Materialize instances to that AKS - cluster - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< self-managed/tutorial-disclaimer >}} - -{{< /warning >}} - -## Prerequisites - -### Azure subscription - -If you do not have an Azure subscription to use for this tutorial, create one. - -### Azure CLI - -If you don't have Azure CLI installed, [install Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli). - -### Terraform - -If you don't have Terraform installed, [install Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). - -### kubectl - -If you do not have `kubectl`, install `kubectl`. - -### Python (v3.12+) and pip - -If you don't have Python (v3.12 or greater) installed, install it. See -[Python.org](https://www.python.org/downloads/). If `pip` is not included with -your version of Python, install it. - -### Helm 3.2.0+ - -If you don't have Helm version 3.2.0+ installed, install. For details, see to -the [Helm documentation](https://helm.sh/docs/intro/install/). - -### jq (Optional) - -*Optional*. `jq` is used to parse the AKS cluster name and region from the -Terraform outputs. Alternatively, you can manually specify the name and region. -If you want to use `jq` and do not have `jq` installed, install. - -### License key - -Starting in v26.0, Self-Managed Materialize requires a license key. - -{{< yaml-table data="self_managed/license_key" >}} - -## A. Authenticate with Azure - -1. Open a Terminal window. - -1. Authenticate with Azure. - - ```bash - az login - ``` - - The command opens a browser window to sign in to Azure. Sign in. - -1. Select the subscription and tenant to use. After you have signed in, back in - the terminal, your tenant and subscription information is displayed. - - ```none - Retrieving tenants and subscriptions for the selection... - - [Tenant and subscription selection] - - No Subscription name Subscription ID Tenant - ----- ------------------- ------------------------------------ ---------------- - [1]* ... ... ... - - The default is marked with an *; the default tenant is '' and - subscription is '' (). - ``` - - Select the subscription and tenant. - -1. Set `ARM_SUBSCRIPTION_ID` to the subscription ID. - - ```bash - export ARM_SUBSCRIPTION_ID= - ``` - -## B. Set up Azure Kubernetes environment and install Materialize - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - -{{< tabs >}} - -{{< tab "Deployed components" >}} - -[Materialize on Azure Terraform -module](https://github.com/MaterializeInc/terraform-azurerm-materialize) for -deploys a sample infrastructure on Azure with the following components: - -{{< yaml-table data="self_managed/azure_terraform_deployed_components" >}} - -{{< tip >}} - -{{% self-managed/azure-terraform-configs %}} - -{{< /tip >}} - -{{}} -{{< tab "Releases" >}} - -{{< yaml-table data="self_managed/azure_terraform_versions" >}} - -{{}} -{{}} - -1. Open a Terminal window. - -{{% self-managed/versions/step-clone-azure-terraform-repo %}} - -1. Go to the `examples/simple` folder in the Materialize Terraform repo - directory. - - ```bash - cd terraform-azurerm-materialize/examples/simple - ``` - - {{< tip >}} - - {{% self-managed/azure-terraform-configs %}} - - {{< /tip >}} - - -1. Optional. Create a virtual environment, specifying a path for the new virtual - environment: - - ```bash - python3 -m venv - - ``` - - Activate the virtual environment: - ```bash - source /bin/activate - ``` - -1. Install the required packages. - - ```bash - pip install -r requirements.txt - ``` - -1. Create a `terraform.tfvars` file (you can copy from the - `terraform.tfvars.example` file) and specify: - - - The prefix for the resources. Prefix has a maximum of 12 characters and - contains only alphanumeric characters and hyphens; e.g., `mydemo`. - - - The location for the AKS cluster. - - ```bash - prefix="enter-prefix" // maximum 12 characters, containing only alphanumeric characters and hyphens; e.g. mydemo - location="eastus2" - ``` - - {{< tip >}} - - {{% self-managed/azure-terraform-configs %}} - - {{< /tip >}} - -1. Initialize the terraform directory. - - ```bash - terraform init - ``` - -1. Use terraform plan to review the changes to be made. - - ```bash - terraform plan - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply - ``` - - To approve the changes and apply, enter `yes`. - - Upon successful completion, various fields and their values are output: - - ```bash - Apply complete! Resources: 33 added, 0 changed, 0 destroyed. - - Outputs: - - aks_cluster = - connection_strings = - kube_config = - load_balancer_details = {} - resource_group_name = "mydemo-rg" - ``` - -1. Configure `kubectl` to connect to your cluster: - - - ``. Your cluster name has the form `-aks`; e.g., - `mz-simple-aks`. - - - ``, as specified in the output. - - ```bash - az aks get-credentials --resource-group --name - ``` - - Alternatively, you can use the following command to get the cluster name and - resource group name from the Terraform output: - - ```bash - az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -json aks_cluster | jq -r '.name') - ``` - - To verify that you have configured correctly, run the following command: - - ```bash - kubectl cluster-info - ``` - - For help with `kubectl` commands, see [kubectl Quick - reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). - -1. By default, the example Terraform installs the Materialize Operator and, - starting in v0.3.0, a `cert-manager`. Verify the - installation and check the status: - - {{< tabs >}} - {{< tab "Materialize Operator" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n materialize - ``` - - Wait for the components to be in the `Running` state: - - ```none - NAME READY STATUS RESTARTS AGE - pod/materialize-mydemo-materialize-operator-74d8f549d6-lkjjf 1/1 Running 0 36m - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/materialize-mydemo-materialize-operator 1/1 1 1 36m - - NAME DESIRED CURRENT READY AGE - replicaset.apps/materialize-mydemo-materialize-operator-74d8f549d6 1 1 1 36m - ``` - - {{}} - - {{< tab "cert-manager (Starting in version 0.3.0)" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n cert-manager - ``` - Wait for the components to be in the `Running` state: - ``` - NAME READY STATUS RESTARTS AGE - pod/cert-manager-8576d99cc8-xqxbc 1/1 Running 0 4m22s - pod/cert-manager-cainjector-664b5878d6-wc4tz 1/1 Running 0 4m22s - pod/cert-manager-webhook-6ddb7bd6c5-vrm2p 1/1 Running 0 4m22s - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/cert-manager ClusterIP 10.1.227.230 9402/TCP 4m22s - service/cert-manager-cainjector ClusterIP 10.1.222.156 9402/TCP 4m22s - service/cert-manager-webhook ClusterIP 10.1.84.207 443/TCP,9402/TCP 4m22s - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/cert-manager 1/1 1 1 4m23s - deployment.apps/cert-manager-cainjector 1/1 1 1 4m23s - deployment.apps/cert-manager-webhook 1/1 1 1 4m23s - - NAME DESIRED CURRENT READY AGE - replicaset.apps/cert-manager-8576d99cc8 1 1 1 4m23s - replicaset.apps/cert-manager-cainjector-664b5878d6 1 1 1 4m23s - replicaset.apps/cert-manager-webhook-6ddb7bd6c5 1 1 1 4m23s - ``` - - {{}} - {{}} - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). - -1. Once the Materialize operator is deployed and running, you can deploy the - Materialize instances. To deploy Materialize instances, create a - `mz_instances.tfvars` file with the Materialize instance configuration. - - For example, the following specifies the configuration for a `demo` instance. - - ```bash - cat < mz_instances.tfvars - - materialize_instances = [ - { - name = "demo" - namespace = "materialize-environment" - database_name = "demo_db" - cpu_request = "1" - memory_request = "2Gi" - memory_limit = "2Gi" - license_key = "" - } - ] - EOF - ``` - - - **Starting in v26.0**, Self-Managed Materialize requires a license key. To - get your license key: - {{% yaml-table data="self_managed/license_key" %}} - - - **Starting in v0.3.0**, the Materialize on Azure Terraform module also - deploys, by default, a self-signed `ClusterIssuer`. The `ClusterIssuer` is - deployed after the `cert-manager` is deployed and running. - - - **Starting in v0.3.1**, the Materialize on Azure Terraform module also - deploys, by default, [Load - balancers](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) - for Materialize instances (i.e., the - [`create_load_balancer`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) - flag defaults to `true`). The load balancers, by default, are configured to - be internal (i.e., the - [`internal_load_balancer`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) - flag defaults to `true`). - - - **Starting in v0.4.3**, you can specify addition configuration options via - `environmentd_extra_args`. - - {{< tip >}} - {{% self-managed/azure-terraform-upgrade-notes %}} - - See [Materialize on Azure releases](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) for notable changes. - {{}} - -1. Run `terraform plan` with both `.tfvars` files and review the changes to be - made. - - ```bash - terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - The plan should show the changes to be made, with a summary similar to the - following: - - ``` - Plan: 9 to add, 1 to change, 0 to destroy. - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - To approve the changes and apply, enter `yes`. - - - - Upon successful completion, you should see output with a summary similar to the following: - - ```bash - Apply complete! Resources: 9 added, 1 changed, 0 destroyed. - - Outputs: - - aks_cluster = - connection_strings = - kube_config = - load_balancer_details = { - "demo" = { - "balancerd_load_balancer_ip" = "192.0.2.10" - "console_load_balancer_ip" = "192.0.2.254" - } - } - resource_group_name = "mydemo-rg" - ``` - -1. Verify the installation and check the status: - - ```bash - kubectl get all -n materialize-environment - ``` - - Wait for the components to be ready and in the `Running` state. - - ```none - NAME READY STATUS RESTARTS AGE - pod/db-demo-db-l6ss8 0/1 Completed 0 2m21s - pod/mz62lr3yltj8-balancerd-6d5dd6d4cf-r9nf4 1/1 Running 0 111s - pod/mz62lr3yltj8-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 114s - pod/mz62lr3yltj8-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 114s - pod/mz62lr3yltj8-console-bfc797745-6nlwv 1/1 Running 0 96s - pod/mz62lr3yltj8-console-bfc797745-tk9vm 1/1 Running 0 96s - pod/mz62lr3yltj8-environmentd-1-0 1/1 Running 0 2m4s - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/mz62lr3yltj8-balancerd ClusterIP None 6876/TCP,6875/TCP 111s - service/mz62lr3yltj8-balancerd-lb LoadBalancer 10.1.201.77 192.0.2.10 6875:30890/TCP,6876:31750/TCP 2m4s - service/mz62lr3yltj8-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 114s - service/mz62lr3yltj8-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 114s - service/mz62lr3yltj8-console ClusterIP None 8080/TCP 96s - service/mz62lr3yltj8-console-lb LoadBalancer 10.1.130.212 192.0.2.254 8080:30379/TCP 2m4s - service/mz62lr3yltj8-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 111s - service/mz62lr3yltj8-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 2m5s - service/mz62lr3yltj8-persist-pubsub-1 ClusterIP None 6879/TCP 2m4s - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/mz62lr3yltj8-balancerd 1/1 1 1 111s - deployment.apps/mz62lr3yltj8-console 2/2 2 2 96s - - NAME DESIRED CURRENT READY AGE - replicaset.apps/mz62lr3yltj8-balancerd-6d5dd6d4cf 1 1 1 111s - replicaset.apps/mz62lr3yltj8-console-bfc797745 2 2 2 96s - - NAME READY AGE - statefulset.apps/mz62lr3yltj8-cluster-s2-replica-s1-gen-1 1/1 114s - statefulset.apps/mz62lr3yltj8-cluster-u1-replica-u1-gen-1 1/1 114s - statefulset.apps/mz62lr3yltj8-environmentd-1 1/1 2m4s - - NAME STATUS COMPLETIONS DURATION AGE - job.batch/db-demo-db Complete 1/1 10s 2m21s - - ``` - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). - -1. Open the Materialize Console in your browser: - - - {{< tabs >}} - - {{< tab "Via Network Load Balancer" >}} - - Starting in v0.3.1, for each Materialize instance, Materialize on Azure - Terraform module also deploys load balancers (by default, internal) with the - following listeners, including a listener on port 8080 for the Materialize - Console: - - | Port | Description | - | ---- | ------------| - | 6875 | For SQL connections to the database | - | 6876 | For HTTP(S) connections to the database | - | **8080** | **For HTTP(S) connections to Materialize Console** | - - The load balancer details are found in the `load_balancer_details` in - the [Terraform output](#azure-terraform-output). - - The example uses a self-signed ClusterIssuer. As such, you may encounter a - warning with regards to the certificate. In production, run with certificates - from an official Certificate Authority (CA) rather than self-signed - certificates. - - {{}} - - {{< tab "Via port forwarding" >}} - - {{% self-managed/port-forwarding-handling %}} - - {{}} - {{}} - - {{< tip >}} - - {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} - - {{< /tip >}} - -## Next steps - -{{% self-managed/next-steps %}} - -## Cleanup - -{{% self-managed/cleanup-cloud %}} - - {{< tip>}} - - If the `terraform destroy` command is unable to delete the subnet because it - is in use, you can rerun the `terraform destroy` command. - - {{}} +| Guide | Description | +|-------|-------------| +| [Terraform Provider](/installation/install-on-azure/terraform-module/) | Install Materialize on Azure using our new Unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-azure/legacy-terraform-module/) | Install Materialize on Azure using our Terraform Provider (legacy) | +| [Appendix: Azure deployment guidelines](/installation/install-on-azure/appendix-deployment-guidelines/) | Additional guidelines for Azure deployments | ## See also - [Materialize Operator Configuration](/installation/configuration/) - [Troubleshooting](/installation/troubleshooting/) -- [Appendix: Azure deployment guidelines](/installation/install-on-azure/ - appendix-deployment-guidelines) - [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md index b0be5c7180155..cb3f16f1a01c9 100644 --- a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md @@ -12,9 +12,9 @@ menu: As a general guideline, we recommend: -- Processor Type: ARM-based CPU - -- Sizing: 2:1 disk-to-RAM ratio with spill-to-disk enabled. +- ARM-based CPU +- A 1:8 ratio of vCPU to GiB memory is recommended. +- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. ### Recommended Azure VM Types with Local NVMe Disks @@ -37,43 +37,39 @@ when the VM is stopped or deleted. {{}} -See also [Locally attached NVMe storage](#locally-attached-nvme-storage). - ## Locally-attached NVMe storage -For optimal performance, Materialize requires fast, locally-attached NVMe -storage. Having a locally-attached storage allows Materialize to spill to disk -when operating on datasets larger than main memory as well as allows for a more -graceful degradation rather than OOMing. Network-attached storage (like EBS -volumes) can significantly degrade performance and is not supported. +Configuring swap on nodes to using locally-attached NVMe storage allows +Materialize to spill to disk when operating on datasets larger than main memory. +This setup can provide significant cost savings and provides a more graceful +degradation rather than OOMing. Network-attached storage (like EBS volumes) can +significantly degrade performance and is not supported. ### Swap support -Starting in v0.6.1 of Materialize on Azure Terraform, -disk support (using swap on NVMe instance storage) may be enabled for -Materialize. With this change, the Terraform: +***New Unified Terraform*** + +The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure/examples/simple) supports configuring swap out of the box. +***Legacy Terraform*** + +The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_swap_enabled) variable. +With this change, the Terraform: - Creates a node group for Materialize. - Configures NVMe instance store volumes as swap using a daemonset. - Enables swap at the Kubelet. -For swap support, the following configuration option is available: - -- [`swap_enabled`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_swap_enabled) - See [Upgrade Notes](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#v061). +{{< note >}} +If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +{{< /note >}} + ## Recommended Azure Blob Storage Materialize writes **block** blobs on Azure. As a general guideline, we recommend **Premium block blob** storage accounts. -## CPU affinity - -It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). -This ensures that each worker thread of Materialize is given exclusively access to a vCPU. Our benchmarks have shown this -to substantially improve the performance of compute-bound workloads. - ## TLS When running with TLS in production, run with certificates from an official @@ -84,11 +80,3 @@ Certificate Authority (CA) rather than self-signed certificates. - [Configuration](/installation/configuration/) - [Installation](/installation/) - [Troubleshooting](/installation/troubleshooting/) - -[`enable_disk_support`]: https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#disk-support-for-materialize-on-azure - -[`disk_support_config`]: - https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_disk_support_config - -[`disk_setup_image`]: - https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_disk_setup_image diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md new file mode 100644 index 0000000000000..9588822487a23 --- /dev/null +++ b/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md @@ -0,0 +1,23 @@ +--- +title: "Terraform Module (legacy)" +description: "" +disable_list: true +disable_toc: true +menu: + main: + parent: "install-on-azure" + identifier: "install-on-azure-legacy-terraform-module" + weight: 5 + + +--- + +The tutorials in this section show you how to deploy Materialize using the [Materialize on Azure Legacy Terraform +modules](https://github.com/MaterializeInc/terraform-azurerm-materialize). + + +| Guide | Description | +|-------|-------------| +| [Install](/installation/install-on-azure/legacy-terraform-module/install/) | Install Materialize on Azure | +| [Upgrade](/installation/install-on-azure/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on Azure | +| [Appendix: Azure configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/) | Configuration for Azure deployments | diff --git a/doc/user/content/installation/install-on-azure/appendix-azure-configuration.md b/doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md similarity index 82% rename from doc/user/content/installation/install-on-azure/appendix-azure-configuration.md rename to doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md index c2613724d1f97..2146660c94f74 100644 --- a/doc/user/content/installation/install-on-azure/appendix-azure-configuration.md +++ b/doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md @@ -3,7 +3,7 @@ title: "Appendix: Required configuration" description: "Required configuration for Materialize on Azure Terraform." menu: main: - parent: "install-on-azure" + parent: "install-on-azure-legacy-terraform-module" identifier: "appendix-azure-config" weight: 50 aliases: @@ -83,5 +83,21 @@ provider "helm" { } ``` +## Swap support + +Starting in v0.6.1 of Materialize on Azure Terraform, +disk support (using swap on NVMe instance storage) may be enabled for +Materialize. With this change, the Terraform: + +- Creates a node group for Materialize. +- Configures NVMe instance store volumes as swap using a daemonset. +- Enables swap at the Kubelet. + +For swap support, the following configuration option is available: + +- [`swap_enabled`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_swap_enabled) + +See [Upgrade Notes](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#v061). + [^1]: If using the `examples/simple/main.tf`, the example configuration handles them for you. diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md b/doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md new file mode 100644 index 0000000000000..4d340e0ee08fc --- /dev/null +++ b/doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md @@ -0,0 +1,522 @@ +--- +title: "Install" +description: "Install Materialize on Azure Kubernetes Service (AKS) using Terraform" +menu: + main: + parent: "install-on-azure-legacy-terraform-module" + identifier: "install-azure" + weight: 5 +--- + + +{{% self-managed/materialize-components-sentence blobstorage="blob storage; specifically **block** blob storage on Azure" %}} + +The tutorial deploys Materialize to Azure Kubernetes Service (AKS) with a +PostgreSQL database as the metadata database and Azure premium block blob +storage for blob storage. The tutorial uses [Materialize on Azure Terraform +modules](https://github.com/MaterializeInc/terraform-azurerm-materialize) to: + +- Set up the Azure Kubernetes environment +- Call + [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) + module to deploy Materialize Operator and Materialize instances to that AKS + cluster + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< self-managed/tutorial-disclaimer >}} + +{{< /warning >}} + +## Prerequisites + +### Azure subscription + +If you do not have an Azure subscription to use for this tutorial, create one. + +### Azure CLI + +If you don't have Azure CLI installed, [install Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli). + +### Terraform + +If you don't have Terraform installed, [install Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). + +### kubectl + +If you do not have `kubectl`, install `kubectl`. + +### Python (v3.12+) and pip + +If you don't have Python (v3.12 or greater) installed, install it. See +[Python.org](https://www.python.org/downloads/). If `pip` is not included with +your version of Python, install it. + +### Helm 3.2.0+ + +If you don't have Helm version 3.2.0+ installed, install. For details, see to +the [Helm documentation](https://helm.sh/docs/intro/install/). + +### jq (Optional) + +*Optional*. `jq` is used to parse the AKS cluster name and region from the +Terraform outputs. Alternatively, you can manually specify the name and region. +If you want to use `jq` and do not have `jq` installed, install. + +### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +## A. Authenticate with Azure + +1. Open a Terminal window. + +1. Authenticate with Azure. + + ```bash + az login + ``` + + The command opens a browser window to sign in to Azure. Sign in. + +1. Select the subscription and tenant to use. After you have signed in, back in + the terminal, your tenant and subscription information is displayed. + + ```none + Retrieving tenants and subscriptions for the selection... + + [Tenant and subscription selection] + + No Subscription name Subscription ID Tenant + ----- ------------------- ------------------------------------ ---------------- + [1]* ... ... ... + + The default is marked with an *; the default tenant is '' and + subscription is '' (). + ``` + + Select the subscription and tenant. + +1. Set `ARM_SUBSCRIPTION_ID` to the subscription ID. + + ```bash + export ARM_SUBSCRIPTION_ID= + ``` + +## B. Set up Azure Kubernetes environment and install Materialize + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +{{< tabs >}} + +{{< tab "Deployed components" >}} + +[Materialize on Azure Terraform +module](https://github.com/MaterializeInc/terraform-azurerm-materialize) for +deploys a sample infrastructure on Azure with the following components: + +{{< yaml-table data="self_managed/azure_terraform_deployed_components" >}} + +{{< tip >}} + +{{% self-managed/azure-terraform-configs %}} + +{{< /tip >}} + +{{}} +{{< tab "Releases" >}} + +{{< yaml-table data="self_managed/azure_terraform_versions" >}} + +{{}} +{{}} + +1. Open a Terminal window. + +{{% self-managed/versions/step-clone-azure-terraform-repo %}} + +1. Go to the `examples/simple` folder in the Materialize Terraform repo + directory. + + ```bash + cd terraform-azurerm-materialize/examples/simple + ``` + + {{< tip >}} + + {{% self-managed/azure-terraform-configs %}} + + {{< /tip >}} + + +1. Optional. Create a virtual environment, specifying a path for the new virtual + environment: + + ```bash + python3 -m venv + + ``` + + Activate the virtual environment: + ```bash + source /bin/activate + ``` + +1. Install the required packages. + + ```bash + pip install -r requirements.txt + ``` + +1. Create a `terraform.tfvars` file (you can copy from the + `terraform.tfvars.example` file) and specify: + + - The prefix for the resources. Prefix has a maximum of 12 characters and + contains only alphanumeric characters and hyphens; e.g., `mydemo`. + + - The location for the AKS cluster. + + ```bash + prefix="enter-prefix" // maximum 12 characters, containing only alphanumeric characters and hyphens; e.g. mydemo + location="eastus2" + ``` + + {{< tip >}} + + {{% self-managed/azure-terraform-configs %}} + + {{< /tip >}} + +1. Initialize the terraform directory. + + ```bash + terraform init + ``` + +1. Use terraform plan to review the changes to be made. + + ```bash + terraform plan + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply + ``` + + To approve the changes and apply, enter `yes`. + + Upon successful completion, various fields and their values are output: + + ```bash + Apply complete! Resources: 33 added, 0 changed, 0 destroyed. + + Outputs: + + aks_cluster = + connection_strings = + kube_config = + load_balancer_details = {} + resource_group_name = "mydemo-rg" + ``` + +1. Configure `kubectl` to connect to your cluster: + + - ``. Your cluster name has the form `-aks`; e.g., + `mz-simple-aks`. + + - ``, as specified in the output. + + ```bash + az aks get-credentials --resource-group --name + ``` + + Alternatively, you can use the following command to get the cluster name and + resource group name from the Terraform output: + + ```bash + az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -json aks_cluster | jq -r '.name') + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl cluster-info + ``` + + For help with `kubectl` commands, see [kubectl Quick + reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +1. By default, the example Terraform installs the Materialize Operator and, + starting in v0.3.0, a `cert-manager`. Verify the + installation and check the status: + + {{< tabs >}} + {{< tab "Materialize Operator" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n materialize + ``` + + Wait for the components to be in the `Running` state: + + ```none + NAME READY STATUS RESTARTS AGE + pod/materialize-mydemo-materialize-operator-74d8f549d6-lkjjf 1/1 Running 0 36m + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/materialize-mydemo-materialize-operator 1/1 1 1 36m + + NAME DESIRED CURRENT READY AGE + replicaset.apps/materialize-mydemo-materialize-operator-74d8f549d6 1 1 1 36m + ``` + + {{}} + + {{< tab "cert-manager (Starting in version 0.3.0)" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n cert-manager + ``` + Wait for the components to be in the `Running` state: + ``` + NAME READY STATUS RESTARTS AGE + pod/cert-manager-8576d99cc8-xqxbc 1/1 Running 0 4m22s + pod/cert-manager-cainjector-664b5878d6-wc4tz 1/1 Running 0 4m22s + pod/cert-manager-webhook-6ddb7bd6c5-vrm2p 1/1 Running 0 4m22s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/cert-manager ClusterIP 10.1.227.230 9402/TCP 4m22s + service/cert-manager-cainjector ClusterIP 10.1.222.156 9402/TCP 4m22s + service/cert-manager-webhook ClusterIP 10.1.84.207 443/TCP,9402/TCP 4m22s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/cert-manager 1/1 1 1 4m23s + deployment.apps/cert-manager-cainjector 1/1 1 1 4m23s + deployment.apps/cert-manager-webhook 1/1 1 1 4m23s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/cert-manager-8576d99cc8 1 1 1 4m23s + replicaset.apps/cert-manager-cainjector-664b5878d6 1 1 1 4m23s + replicaset.apps/cert-manager-webhook-6ddb7bd6c5 1 1 1 4m23s + ``` + + {{}} + {{}} + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting/). + +1. Once the Materialize operator is deployed and running, you can deploy the + Materialize instances. To deploy Materialize instances, create a + `mz_instances.tfvars` file with the Materialize instance configuration. + + For example, the following specifies the configuration for a `demo` instance. + + ```bash + cat < mz_instances.tfvars + + materialize_instances = [ + { + name = "demo" + namespace = "materialize-environment" + database_name = "demo_db" + cpu_request = "1" + memory_request = "2Gi" + memory_limit = "2Gi" + license_key = "" + } + ] + EOF + ``` + + - **Starting in v0.3.0**, the Materialize on Azure Terraform module also + deploys, by default, a self-signed `ClusterIssuer`. The `ClusterIssuer` is + deployed after the `cert-manager` is deployed and running. + + - **Starting in v0.3.1**, the Materialize on Azure Terraform module also + deploys, by default, [Load + balancers](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) + for Materialize instances (i.e., the + [`create_load_balancer`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) + flag defaults to `true`). The load balancers, by default, are configured to + be internal (i.e., the + [`internal_load_balancer`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_materialize_instances) + flag defaults to `true`). + + - **Starting in v0.4.3**, you can specify addition configuration options via + `environmentd_extra_args`. + + {{< tip >}} + {{% self-managed/azure-terraform-upgrade-notes %}} + + See [Materialize on Azure releases](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) for notable changes. + {{}} + +1. Run `terraform plan` with both `.tfvars` files and review the changes to be + made. + + ```bash + terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + The plan should show the changes to be made, with a summary similar to the + following: + + ``` + Plan: 9 to add, 1 to change, 0 to destroy. + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + To approve the changes and apply, enter `yes`. + + + + Upon successful completion, you should see output with a summary similar to the following: + + ```bash + Apply complete! Resources: 9 added, 1 changed, 0 destroyed. + + Outputs: + + aks_cluster = + connection_strings = + kube_config = + load_balancer_details = { + "demo" = { + "balancerd_load_balancer_ip" = "192.0.2.10" + "console_load_balancer_ip" = "192.0.2.254" + } + } + resource_group_name = "mydemo-rg" + ``` + +1. Verify the installation and check the status: + + ```bash + kubectl get all -n materialize-environment + ``` + + Wait for the components to be ready and in the `Running` state. + + ```none + NAME READY STATUS RESTARTS AGE + pod/db-demo-db-l6ss8 0/1 Completed 0 2m21s + pod/mz62lr3yltj8-balancerd-6d5dd6d4cf-r9nf4 1/1 Running 0 111s + pod/mz62lr3yltj8-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 114s + pod/mz62lr3yltj8-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 114s + pod/mz62lr3yltj8-console-bfc797745-6nlwv 1/1 Running 0 96s + pod/mz62lr3yltj8-console-bfc797745-tk9vm 1/1 Running 0 96s + pod/mz62lr3yltj8-environmentd-1-0 1/1 Running 0 2m4s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/mz62lr3yltj8-balancerd ClusterIP None 6876/TCP,6875/TCP 111s + service/mz62lr3yltj8-balancerd-lb LoadBalancer 10.1.201.77 192.0.2.10 6875:30890/TCP,6876:31750/TCP 2m4s + service/mz62lr3yltj8-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 114s + service/mz62lr3yltj8-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 114s + service/mz62lr3yltj8-console ClusterIP None 8080/TCP 96s + service/mz62lr3yltj8-console-lb LoadBalancer 10.1.130.212 192.0.2.254 8080:30379/TCP 2m4s + service/mz62lr3yltj8-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 111s + service/mz62lr3yltj8-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 2m5s + service/mz62lr3yltj8-persist-pubsub-1 ClusterIP None 6879/TCP 2m4s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/mz62lr3yltj8-balancerd 1/1 1 1 111s + deployment.apps/mz62lr3yltj8-console 2/2 2 2 96s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/mz62lr3yltj8-balancerd-6d5dd6d4cf 1 1 1 111s + replicaset.apps/mz62lr3yltj8-console-bfc797745 2 2 2 96s + + NAME READY AGE + statefulset.apps/mz62lr3yltj8-cluster-s2-replica-s1-gen-1 1/1 114s + statefulset.apps/mz62lr3yltj8-cluster-u1-replica-u1-gen-1 1/1 114s + statefulset.apps/mz62lr3yltj8-environmentd-1 1/1 2m4s + + NAME STATUS COMPLETIONS DURATION AGE + job.batch/db-demo-db Complete 1/1 10s 2m21s + + ``` + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting/). + +1. Open the Materialize Console in your browser: + + + {{< tabs >}} + + {{< tab "Via Network Load Balancer" >}} + + Starting in v0.3.1, for each Materialize instance, Materialize on Azure + Terraform module also deploys load balancers (by default, internal) with the + following listeners, including a listener on port 8080 for the Materialize + Console: + + | Port | Description | + | ---- | ------------| + | 6875 | For SQL connections to the database | + | 6876 | For HTTP(S) connections to the database | + | **8080** | **For HTTP(S) connections to Materialize Console** | + + The load balancer details are found in the `load_balancer_details` in + the [Terraform output](#azure-terraform-output). + + The example uses a self-signed ClusterIssuer. As such, you may encounter a + warning with regards to the certificate. In production, run with certificates + from an official Certificate Authority (CA) rather than self-signed + certificates. + + {{}} + + {{< tab "Via port forwarding" >}} + + {{% self-managed/port-forwarding-handling %}} + + {{}} + {{}} + + {{< tip >}} + + {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} + + {{< /tip >}} + +## Next steps + +{{% self-managed/next-steps %}} + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + + {{< tip>}} + + If the `terraform destroy` command is unable to delete the subnet because it + is in use, you can rerun the `terraform destroy` command. + + {{}} + +## See also + +- [Materialize Operator Configuration](/installation/configuration/) +- [Troubleshooting](/installation/troubleshooting/) +- [Appendix: Azure deployment guidelines](/installation/install-on-azure/ + appendix-deployment-guidelines) +- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-azure/upgrade-on-azure.md b/doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md similarity index 98% rename from doc/user/content/installation/install-on-azure/upgrade-on-azure.md rename to doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md index 763d5004888cc..c11593bfe4014 100644 --- a/doc/user/content/installation/install-on-azure/upgrade-on-azure.md +++ b/doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md @@ -1,9 +1,9 @@ --- -title: "Upgrade on Azure (Terraform)" +title: "Upgrade" description: "Procedure to upgrade your Materialize operator and instances running on Azure" menu: main: - parent: "install-on-azure" + parent: "install-on-azure-legacy-terraform-module" identifier: "upgrade-on-azure" weight: 10 --- diff --git a/doc/user/content/installation/install-on-azure/terraform-module/_index.md b/doc/user/content/installation/install-on-azure/terraform-module/_index.md new file mode 100644 index 0000000000000..ca72c1f6a607b --- /dev/null +++ b/doc/user/content/installation/install-on-azure/terraform-module/_index.md @@ -0,0 +1,154 @@ +--- +title: "Terraform Module" +description: "" +menu: + main: + parent: "install-on-azure" + identifier: "install-azure-terraform" + weight: 5 +--- + +Materialize provides a set of modular Terraform modules that can be used to +deploy all services required for a production ready Materialize database. +The module is intended to provide a simple set of examples on how to deploy +materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +The repository can be found at: + +***[Materialize Terraform Self-Managed Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure)*** + +Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) documentation for a full understanding +of the module structure and customizations. + +Also check out the [Azure deployment guide](/installation/install-on-azure/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. + +{{< note >}} +{{% self-managed/materialize-components-sentence %}} +{{< /note >}} + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + + +## Prerequisites + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [Azure Cli ](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +#### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +--- + +# Example: Simple Materialize Deployment on Azure + +This example demonstrates how to deploy a complete Materialize environment on Azure using the modular Terraform setup from this repository. + + +## Setup +```shell +git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git +cd materialize-terraform-self-managed/azure/examples/simple +```` + +## What Gets Created + +This example provisions the following infrastructure: + +### Resource Group +- **Resource Group**: New resource group to contain all resources + +### Networking +- **Virtual Network**: 20.0.0.0/16 address space +- **AKS Subnet**: 20.0.0.0/20 with NAT Gateway association and service endpoints for Storage and SQL +- **PostgreSQL Subnet**: 20.0.16.0/24 delegated to PostgreSQL Flexible Server +- **NAT Gateway**: Standard SKU with static public IP for outbound connectivity +- **Private DNS Zone**: For PostgreSQL private endpoint resolution with VNet link + +### Compute +- **AKS Cluster**: Version 1.32 with Cilium networking (network plugin: azure, data plane: cilium, policy: cilium) +- **Default Node Pool**: Standard_D4pds_v6 VMs, autoscaling 2-5 nodes, labeled for generic workloads +- **Materialize Node Pool**: Standard_E4pds_v6 VMs with 100GB disk, autoscaling 2-5 nodes, swap enabled, dedicated taints for Materialize workloads +- **Managed Identities**: + - AKS cluster identity: Used by AKS control plane to provision Azure resources (creating load balancers when Materialize LoadBalancer services are created, managing network interfaces) + - Workload identity: Used by Materialize pods for secure, passwordless authentication to Azure Storage (no storage account keys stored in cluster) + +### Database +- **Azure PostgreSQL Flexible Server**: Version 15 +- **SKU**: GP_Standard_D2s_v3 (2 vCores, 4GB memory) +- **Storage**: 32GB with 7-day backup retention +- **Network Access**: Public Network Access is disabled, Private access only (no public endpoint) +- **Database**: `materialize` database pre-created + +### Storage +- **Storage Account**: Premium BlockBlobStorage with LRS replication for Materialize persistence +- **Container**: `materialize` blob container +- **Access Control**: Workload Identity federation for Kubernetes service account (passwordless authentication via OIDC) +- **Network Access**: Currently allows all traffic (production deployments should restrict to AKS subnet only traffic) + +### Kubernetes Add-ons +- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal +- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +### Materialize +- **Operator**: Materialize Kubernetes operator +- **Instance**: Single Materialize instance in `materialize-environment` namespace +- **Load Balancers**: Internal Azure Load Balancers for Materialize access + +--- + +## Getting Started + +### Step 1: Set Required Variables + +Before running Terraform, create a `terraform.tfvars` file with the following variables: + +```hcl +subscription_id = "12345678-1234-1234-1234-123456789012" +resource_group_name = "materialize-demo-rg" +name_prefix = "simple-demo" +location = "westus2" +license_key = "your-materialize-license-key" # Optional: Get from https://materialize.com/self-managed/ +tags = { + environment = "demo" +} +``` + +**Required Variables:** +- `subscription_id`: Azure subscription ID +- `resource_group_name`: Name for the resource group (will be created) +- `name_prefix`: Prefix for all resource names +- `location`: Azure region for deployment +- `tags`: Map of tags to apply to resources +- `license_key`: Materialize license key + +--- + +### Step 2: Deploy Materialize + +Run the usual Terraform workflow: + +```bash +terraform init +terraform apply +``` + +## Notes + +*Autoscaling: Uses Azure's native cluster autoscaler that integrates directly with Azure Virtual Machine Scale Sets for automated node scaling. In future we are planning to enhance this by making use of karpenter-provider-azure* + +* You can customize each module independently. +* To reduce cost in your demo environment, you can tweak VM sizes and database tiers in `main.tf`. + +***Don't forget to destroy resources when finished:*** + +```bash +terraform destroy +``` diff --git a/doc/user/content/installation/install-on-gcp/_index.md b/doc/user/content/installation/install-on-gcp/_index.md index b6c9117b1f6e8..c62a95c53eec1 100644 --- a/doc/user/content/installation/install-on-gcp/_index.md +++ b/doc/user/content/installation/install-on-gcp/_index.md @@ -1,6 +1,6 @@ --- -title: "Install on GCP (via Terraform)" -description: "" +title: "Install on GCP" +description: "Install and upgrade Materialize on GCP" aliases: - /self-hosted/install-on-gcp/ - /self-managed/v25.1/installation/install-on-gcp/ @@ -14,577 +14,14 @@ menu: {{% self-managed/materialize-components-sentence %}} -This tutorial deploys Materialize to GCP Google Kubernetes Engine (GKE) cluster -with a Cloud SQL PostgreSQL database as the metadata database and Cloud Storage -bucket for blob storage. Specifically, the tutorial uses [Materialize on Google -Cloud Provider Terraform -module](https://github.com/MaterializeInc/terraform-google-materialize) to: - -- Set up the GCP environment. - -- Call - [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) - module to deploy Materialize Operator and Materialize instances to the GKE - cluster. - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< self-managed/tutorial-disclaimer >}} - -{{< /warning >}} - -## Prerequisites - -### Google cloud provider project - -You need a GCP project for which you have a role (such as -`roles/resourcemanager.projectIamAdmin` or `roles/owner`) that includes [ -permissions to manage access to the -project](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - -### gcloud CLI - -If you do not have gcloud CLI, install. For details, see the [Install the gcloud -CLI documentation](https://cloud.google.com/sdk/docs/install). - -### Google service account - -The tutorial assumes the use of a service account. If you do not have a service -account to use for this tutorial, create a service account. For details, see -[Create service -accounts](https://cloud.google.com/iam/docs/service-accounts-create#creating). - -### Terraform - -If you do not have Terraform installed, [install -Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). - -### kubectl and plugins - -{{< tip >}} - -Using `gcloud` to install `kubectl` will also install the needed plugins. -Otherwise, you will need to manually install the `gke-gcloud-auth-plugin` for -`kubectl`. - -{{< /tip >}} - -- If you do not have `kubectl`, install `kubectl`. To install, see [Install - kubectl and configure cluster - access](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) - for details. You will configure `kubectl` to interact with your GKE cluster - later in the tutorial. - -- If you do not have `gke-gcloud-auth-plugin` for `kubectl`, install the - `gke-gcloud-auth-plugin`. For details, see [Install the - gke-gcloud-auth-plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin). - -### Helm 3.2.0+ - -If you do not have Helm version 3.2.0+ installed, install. For details, see the -[Helm documentation](https://helm.sh/docs/intro/install/). - -### jq (Optional) - -*Optional*. `jq` is used to parse the GKE cluster name and region from the -Terraform outputs. Alternatively, you can manually specify the name and region. -If you want to use `jq` and do not have `jq` installed, install. - -### License key - -Starting in v26.0, Self-Managed Materialize requires a license key. - -{{< yaml-table data="self_managed/license_key" >}} - -## A. Configure GCP project and service account - -1. Open a Terminal window. - -1. Initialize the gcloud CLI (`gcloud init`) to specify the GCP project you want - to use. For details, see the [Initializing the gcloud CLI - documentation](https://cloud.google.com/sdk/docs/initializing#initialize_the). - - {{< tip >}} - You do not need to configure a default Compute Region and Zone as you will - specify the region. - {{}} - -1. Enable the following services for your GCP project, if not already enabled: - - ```bash - gcloud services enable container.googleapis.com # For creating Kubernetes clusters - gcloud services enable sqladmin.googleapis.com # For creating databases - gcloud services enable cloudresourcemanager.googleapis.com # For managing GCP resources - gcloud services enable servicenetworking.googleapis.com # For private network connections - gcloud services enable iamcredentials.googleapis.com # For security and authentication - ``` - -1. To the service account that will run the Terraform script, - grant the following IAM roles: - - - `roles/editor` - - `roles/iam.serviceAccountAdmin` - - `roles/servicenetworking.networksAdmin` - - `roles/storage.admin` - - `roles/container.admin` - - 1. Enter your GCP project ID. - - ```bash - read -s PROJECT_ID - ``` - - 1. Find your service account email for your GCP project - - ```bash - gcloud iam service-accounts list --project $PROJECT_ID - ``` - - 1. Enter your service account email. - - ```bash - read -s SERVICE_ACCOUNT - ``` - - 1. Grant the service account the neccessary IAM roles. - - ```bash - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member="serviceAccount:$SERVICE_ACCOUNT" \ - --role="roles/editor" - - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member="serviceAccount:$SERVICE_ACCOUNT" \ - --role="roles/iam.serviceAccountAdmin" - - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member="serviceAccount:$SERVICE_ACCOUNT" \ - --role="roles/servicenetworking.networksAdmin" - - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member="serviceAccount:$SERVICE_ACCOUNT" \ - --role="roles/storage.admin" - - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member="serviceAccount:$SERVICE_ACCOUNT" \ - --role="roles/container.admin" - ``` - -1. For the service account, authenticate to allow Terraform to - interact with your GCP project. For details, see [Terraform: Google Cloud - Provider Configuration - reference](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#authentication). - - For example, if using [User Application Default - Credentials](https://cloud.google.com/sdk/gcloud/reference/auth/application-default), - you can run the following command: - - ```bash - gcloud auth application-default login - ``` - - {{< tip >}} - If using `GOOGLE_APPLICATION_CREDENTIALS`, use absolute path to your key file. - {{}} - -## B. Set up GCP Kubernetes environment and install Materialize - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - -{{< tabs >}} - -{{< tab "Deployed components" >}} -[Materialize on GCP Terraform -module](https://github.com/MaterializeInc/terraform-google-materialize) deploys -a sample infrastructure on GCP (region `us-central1`) with the following -components: - -{{< yaml-table data="self_managed/gcp_terraform_deployed_components" >}} - -{{< tip >}} -{{< self-managed/gcp-terraform-configs >}} -{{< /tip >}} -{{}} -{{< tab "Releases" >}} - -{{< yaml-table data="self_managed/gcp_terraform_versions" >}} - -{{}} -{{}} - -{{% self-managed/versions/step-clone-google-terraform-repo %}} - -1. Go to the `examples/simple` folder in the Materialize Terraform repo - directory. - - ```bash - cd terraform-google-materialize/examples/simple - ``` - - {{< tip >}} - {{< self-managed/gcp-terraform-configs >}} - {{< /tip >}} - -1. Create a `terraform.tfvars` file (you can copy from the - `terraform.tfvars.example` file) and specify the following variables: - - | **Variable** | **Description** | - |--------------|-----------------| - | `project_id` | Your GCP project ID. | - | `prefix` | A prefix (e.g., `mz-simple`) for your resources. Prefix has a maximum of 15 characters and contains only alphanumeric characters and dashes. | - | `region` | The region for the GKE cluster. | - - ```bash - project_id = "enter-your-gcp-project-id" - prefix = "enter-your-prefix" // Maximum of 15 characters, contain lowercase alphanumeric and hyphens only (e.g., mz-simple) - region = "us-central1" - ``` - - {{< tip >}} - - {{< self-managed/gcp-terraform-configs >}} - - {{< /tip >}} - -1. Initialize the terraform directory. - - ```bash - terraform init - ``` - -1. Run terraform plan and review the changes to be made. - - ```bash - terraform plan - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply - ``` - - To approve the changes and apply, enter `yes`. - - Upon successful completion, various fields and their values are output: - - ```bash - Apply complete! Resources: 27 added, 0 changed, 0 destroyed. - - Outputs: - - connection_strings = - gke_cluster = - load_balancer_details = {} - network = { - "network_id" = "projects/my-project/global/networks/mz-simple-network" - "network_name" = "mz-simple-network" - "subnet_name" = "mz-simple-subnet" - } - service_accounts = { - "gke_sa" = "mz-simple-gke-sa@my-project.iam.gserviceaccount.com" - "materialize_sa" = "mz-simple-materialize-sa@my-project.iam.gserviceaccount.com" - } - ``` - -1. Configure `kubectl` to connect to your GKE cluster, specifying: - - - ``. Your cluster name has the form `-gke`; e.g., - `mz-simple-gke`. - - - ``. By default, the example Terraform module uses the `us-central1` - region. - - - ``. Your GCP project ID. - - ```bash - gcloud container clusters get-credentials \ - --region \ - --project - ``` - - Alternatively, you can use the following command to get the cluster name and - region from the Terraform output and the project ID from the environment - variable set earlier. - - ```bash - gcloud container clusters get-credentials $(terraform output -json gke_cluster | jq -r .name) \ - --region $(terraform output -json gke_cluster | jq -r .location) --project $PROJECT_ID - ``` - - To verify that you have configured correctly, run the following command: - - ```bash - kubectl cluster-info - ``` - - For help with `kubectl` commands, see [kubectl Quick - reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). - -1. By default, the example Terraform installs the Materialize Operator and, - starting in v0.3.0, a `cert-manager`. Verify the - installation and check the status: - - {{< tabs >}} - {{< tab "Materialize Operator" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n materialize - ``` - - Wait for the components to be in the `Running` state: - - ```none - NAME READY STATUS RESTARTS AGE - pod/materialize-mz-simple-materialize-operator-74d8f549d6-lkjjf 1/1 Running 0 36m - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/materialize-mz-simple-materialize-operator 1/1 1 1 36m - - NAME DESIRED CURRENT READY AGE - replicaset.apps/materialize-mz-simple-materialize-operator-74d8f549d6 1 1 1 36m - ``` - - {{}} - {{< tab "cert-manager (Starting in version 0.3.0)" >}} - - Verify the installation and check the status: - - ```shell - kubectl get all -n cert-manager - ``` - Wait for the components to be in the `Running` state: - ``` - NAME READY STATUS RESTARTS AGE - pod/cert-manager-6794b8d569-vt264 1/1 Running 0 22m - pod/cert-manager-cainjector-7f69cd69f7-7brqw 1/1 Running 0 22m - pod/cert-manager-webhook-6cc5dccc4b-7tmd4 1/1 Running 0 22m - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/cert-manager ClusterIP 10.52.3.63 9402/TCP 22m - service/cert-manager-cainjector ClusterIP 10.52.15.171 9402/TCP 22m - service/cert-manager-webhook ClusterIP 10.52.5.148 443/TCP,9402/TCP 22m - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/cert-manager 1/1 1 1 22m - deployment.apps/cert-manager-cainjector 1/1 1 1 22m - deployment.apps/cert-manager-webhook 1/1 1 1 22m - - NAME DESIRED CURRENT READY AGE - replicaset.apps/cert-manager-6794b8d569 1 1 1 22m - replicaset.apps/cert-manager-cainjector-7f69cd69f7 1 1 1 22m - replicaset.apps/cert-manager-webhook-6cc5dccc4b 1 1 1 22m - ``` - - {{}} - {{}} - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). - -1. Once the Materialize operator is deployed and running, you can deploy the - Materialize instances. To deploy Materialize instances, create a - `mz_instances.tfvars` file with the Materialize instance configuration. - - For example, the following specifies the configuration for a `demo` instance. - - ```bash - cat < mz_instances.tfvars - - materialize_instances = [ - { - name = "demo" - namespace = "materialize-environment" - database_name = "demo_db" - cpu_request = "1" - memory_request = "2Gi" - memory_limit = "2Gi" - license_key = "" - } - ] - EOF - ``` - - - **Starting in v26.0**, Self-Managed Materialize requires a license key. To - get your license key: - {{% yaml-table data="self_managed/license_key" %}} - - - **Starting in v0.3.0**, the Materialize on GCP Terraform module also - deploys, by default: - - - [Load balancers](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) for Materialize instances (i.e., the [`create_load_balancer`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) flag defaults to `true`). The load balancers, by default, are configured to be internal (i.e., the [`internal_load_balancer`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) flag defaults to `true`). - - - A self-signed `ClusterIssuer`. The `ClusterIssuer` is deployed after the - `cert-manager` is deployed and running. - - - **Starting in v0.4.3**, you can specify addition configuration options via - `environmentd_extra_args`. - - {{< tip >}} - {{% self-managed/gcp-terraform-upgrade-notes %}} - - See [Materialize on GCP releases](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) for notable changes. - {{}} - -1. Run `terraform plan` with both `.tfvars` files and review the changes to be - made. - - ```bash - terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - The plan should show the changes to be made, with a summary similar to the - following: - - ``` - Plan: 9 to add, 1 to change, 0 to destroy. - ``` - -1. If you are satisfied with the changes, apply. - - ```bash - terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars - ``` - - To approve the changes and apply, enter `yes`. - - - Upon successful completion, you should see output with a summary similar to the following: - - ```bash - Apply complete! Resources: 9 added, 1 changed, 0 destroyed. - - Outputs: - - connection_strings = - gke_cluster = - load_balancer_details = { - "demo" = { - "balancerd_load_balancer_ip" = "192.0.2.10" - "console_load_balancer_ip" = "192.0.2.254" - } - } - network = { - "network_id" = "projects/my-project/global/networks/mz-simple-network" - "network_name" = "mz-simple-network" - "subnet_name" = "mz-simple-subnet" - } - service_accounts = { - "gke_sa" = "mz-simple-gke-sa@my-project.iam.gserviceaccount.com" - "materialize_sa" = "mz-simple-materialize-sa@my-project.iam.gserviceaccount.com" - } - ``` - -1. Verify the installation and check the status: - - ```bash - kubectl get all -n materialize-environment - ``` - - Wait for the components to be in the `Running` state. - - ```none - NAME READY STATUS RESTARTS AGE - pod/db-demo-db-wrvhw 0/1 Completed 0 4m26s - pod/mzdtwvu4qe4q-balancerd-6989df5c75-mpmqx 1/1 Running 0 3m54s - pod/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 3m53s - pod/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 3m52s - pod/mzdtwvu4qe4q-console-7c9bc94bcb-6t7lg 1/1 Running 0 3m41s - pod/mzdtwvu4qe4q-console-7c9bc94bcb-9x5qq 1/1 Running 0 3m41s - pod/mzdtwvu4qe4q-environmentd-1-0 1/1 Running 0 4m9s - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/mzdtwvu4qe4q-balancerd ClusterIP None 6876/TCP,6875/TCP 3m54s - service/mzdtwvu4qe4q-balancerd-lb LoadBalancer 10.52.5.105 192.0.2.10 6875:30844/TCP,6876:32307/TCP 4m9s - service/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 3m53s - service/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 3m52s - service/mzdtwvu4qe4q-console ClusterIP None 8080/TCP 3m41s - service/mzdtwvu4qe4q-console-lb LoadBalancer 10.52.4.2 192.0.2.254 8080:32193/TCP 4m9s - service/mzdtwvu4qe4q-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 3m54s - service/mzdtwvu4qe4q-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 4m9s - service/mzdtwvu4qe4q-persist-pubsub-1 ClusterIP None 6879/TCP 4m9s - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/mzdtwvu4qe4q-balancerd 1/1 1 1 3m54s - deployment.apps/mzdtwvu4qe4q-console 2/2 2 2 3m41s - - NAME DESIRED CURRENT READY AGE - replicaset.apps/mzdtwvu4qe4q-balancerd-6989df5c75 1 1 1 3m54s - replicaset.apps/mzdtwvu4qe4q-console-7c9bc94bcb 2 2 2 3m41s - - NAME READY AGE - statefulset.apps/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1 1/1 3m53s - statefulset.apps/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1 1/1 3m52s - statefulset.apps/mzdtwvu4qe4q-environmentd-1 1/1 4m9s - - NAME STATUS COMPLETIONS DURATION AGE - job.batch/db-demo-db Complete 1/1 12s 4m27s - - ``` - - If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). - -1. Open the Materialize Console in your browser: - - {{< tabs >}} - - {{< tab "Via Network Load Balancer" >}} - - Starting in v0.3.0, for each Materialize instance, Materialize on GCP - Terraform module also deploys load balancers (by default, internal) with the - following listeners, including a listener on port 8080 for the Materialize - Console: - - | Port | Description | - | ---- | ------------| - | 6875 | For SQL connections to the database | - | 6876 | For HTTP(S) connections to the database | - | **8080** | **For HTTP(S) connections to Materialize Console** | - - The load balancer details are found in the `load_balancer_details` in - the [Terraform output](#gcp-terraform-output). - - The example uses a self-signed ClusterIssuer. As such, you may encounter a - warning with regards to the certificate. In production, run with certificates - from an official Certificate Authority (CA) rather than self-signed - certificates. - - {{}} - - {{< tab "Via port forwarding" >}} - - {{% self-managed/port-forwarding-handling console="console-lb"%}} - - {{}} - {{}} - - - {{< tip >}} - - {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} - - {{< /tip >}} - -## Next steps - -{{% self-managed/next-steps %}} - -## Cleanup - -{{% self-managed/cleanup-cloud %}} +| Guide | Description | +|-------|-------------| +| [Terraform Provider](/installation/install-on-gcp/terraform-module/) | Install Materialize on GCP using our new Unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-gcp/legacy-terraform-module/) | Install Materialize on GCP using our Terraform Provider (legacy) | +| [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) | Additional guidelines for Google Cloud deployments | ## See also -- [Troubleshooting](/installation/troubleshooting/) - [Materialize Operator Configuration](/installation/configuration/) -- [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) +- [Troubleshooting](/installation/troubleshooting/) - [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md index e2a051a92772c..69eae78bce5cf 100644 --- a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md @@ -12,9 +12,9 @@ menu: As a general guideline, we recommend: -- Processor Type: ARM-based CPU - -- Sizing: 2:1 disk-to-RAM ratio with spill-to-disk enabled. +- ARM-based CPU +- A 1:8 ratio of vCPU to GiB memory is recommended. +- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. When operating on GCP in production, we recommend the following machine types that support local SSD attachment: @@ -24,20 +24,16 @@ that support local SSD attachment: | [N2 high-memory series] | `n2-highmem-16` or `n2-highmem-32` with local NVMe SSDs | | [N2D high-memory series] | `n2d-highmem-16` or `n2d-highmem-32` with local NVMe SSDs | -To maintain the recommended 2:1 disk-to-RAM ratio for your machine type, see +To maintain the recommended 8:1 disk-to-RAM ratio for your machine type, see [Number of local SSDs](#number-of-local-ssds) to determine the number of local -SSDs -([`disk_support_config.local_ssd_count`](https://github.com/MaterializeInc/terraform-google-materialize/blob/main/README.md#input_disk_support_config)) -to use. +SSDs to use. See also [Locally attached NVMe storage](#locally-attached-nvme-storage). ## Number of local SSDs Each local NVMe SSD in GCP provides 375GB of storage. Use the appropriate number -of local SSDs -([`disk_support_config.local_ssd_count`](https://github.com/MaterializeInc/terraform-google-materialize/blob/main/README.md#input_disk_support_config)) -to ensure your total disk space is at least twice the amount of RAM in your +of local SSDs to ensure your total disk space is at least twice the amount of RAM in your machine type for optimal Materialize performance. {{< note >}} @@ -65,32 +61,36 @@ type](https://cloud.google.com/compute/docs/disks/local-ssd#lssd_disk_options). [N2D high-memory series]: https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machine_types -[enables spill-to-disk]: https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#disk-support-for-materialize-on-gcp ## Locally-attached NVMe storage -For optimal performance, Materialize requires fast, locally-attached NVMe -storage. Having a locally-attached storage allows Materialize to spill to disk -when operating on datasets larger than main memory as well as allows for a more -graceful degradation rather than OOMing. Network-attached storage (like EBS -volumes) can significantly degrade performance and is not supported. +Configuring swap on nodes to using locally-attached NVMe storage allows +Materialize to spill to disk when operating on datasets larger than main memory. +This setup can provide significant cost savings and provides a more graceful +degradation rather than OOMing. Network-attached storage (like EBS volumes) can +significantly degrade performance and is not supported. ### Swap support -Starting in v0.6.1 of Materialize on Google Cloud PRovider (GCP) Terraform, -disk support (using swap on NVMe instance storage) may be enabled for -Materialize. With this change, the Terraform: +***New Unified Terraform*** -- Creates a node group for Materialize. -- Configures NVMe instance store volumes as swap using a daemonset. -- Enables swap at the Kubelet. +The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp/examples/simple) supports configuring swap out of the box. -For swap support, the following configuration options are available: +***Legacy Terraform*** -- [`swap_enabled`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_swap_enabled) +The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_swap_enabled) variable. +With this change, the Terraform: + - Creates a node group for Materialize. + - Configures NVMe instance store volumes as swap using a daemonset. + - Enables swap at the Kubelet. See [Upgrade Notes](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#v061). +{{< note >}} +If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +{{< /note >}} + + ## CPU affinity It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). @@ -102,15 +102,6 @@ to substantially improve the performance of compute-bound workloads. When running with TLS in production, run with certificates from an official Certificate Authority (CA) rather than self-signed certificates. -## Storage bucket versioning - -Starting in v0.3.1 of Materialize on GCP Terraform, storage bucket versioning is -disabled (i.e., -[`storage_bucket_versioning`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_storage_bucket_versioning) -is set to `false` by default) to facilitate cleanup of resources during testing. -When running in production, versioning should be turned on with a sufficient TTL -([`storage_bucket_version_ttl`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_storage_bucket_version_ttl)) -to meet any data-recovery requirements. ## See also diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md new file mode 100644 index 0000000000000..04a23c89bb532 --- /dev/null +++ b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md @@ -0,0 +1,24 @@ +--- +title: "Terraform Module (legacy)" +description: "" +disable_list: true +disable_toc: true +menu: + main: + parent: "install-on-gcp" + identifier: "install-on-gcp-legacy-terraform-module" + weight: 5 + + +--- + +The tutorials in this section show you how to deploy Materialize using the [Materialize on Google +Cloud Module Legacy Terraform +module](https://github.com/MaterializeInc/terraform-google-materialize). + + +| Guide | Description | +|-------|-------------| +| [Install](/installation/install-on-gcp/legacy-terraform-module/install/) | Install Materialize on GCP | +| [Upgrade](/installation/install-on-gcp/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on GCP | +| [Appendix: GCP configuration](/installation/install-on-gcp/legacy-terraform-module/appendix-configuration/) | Configuration for Google Cloud deployments | diff --git a/doc/user/content/installation/install-on-gcp/appendix-gcp-configuration.md b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md similarity index 52% rename from doc/user/content/installation/install-on-gcp/appendix-gcp-configuration.md rename to doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md index 1522237789e44..288ac69839a61 100644 --- a/doc/user/content/installation/install-on-gcp/appendix-gcp-configuration.md +++ b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md @@ -1,13 +1,13 @@ --- -title: "Appendix: Required configuration" -description: "Required configuration for Materialize on GCP Terraform." +title: "Appendix: Configuration" +description: "Required configuration for Materialize on GCP Terraform (legacy)." menu: main: - parent: "install-on-gcp" - identifier: "appendix-gcp-config" + parent: "install-on-gcp-legacy-terraform-module" + identifier: "legacy-terraform-module-appendix-configuration" weight: 50 aliases: - - /installation/install-on-gcp/appendix-gcp-provider-configuration + - /installation/install-on-gcp/appendix-gcp-configuration/ --- ## Required variables @@ -58,3 +58,29 @@ you need to declare: ```hcl data "google_client_config" "current" {} ``` + +## Swap support + +Starting in v0.6.1 of Materialize on Google Cloud Provider (GCP) Terraform, +disk support (using swap on NVMe instance storage) may be enabled for +Materialize. With this change, the Terraform: + +- Creates a node group for Materialize. +- Configures NVMe instance store volumes as swap using a daemonset. +- Enables swap at the Kubelet. + +For swap support, the following configuration options are available: + +- [`swap_enabled`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_swap_enabled) + +See [Upgrade Notes](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#v061). + +## Storage bucket versioning + +Starting in v0.3.1 of Materialize on GCP Terraform, storage bucket versioning is +disabled (i.e., +[`storage_bucket_versioning`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_storage_bucket_versioning) +is set to `false` by default) to facilitate cleanup of resources during testing. +When running in production, versioning should be turned on with a sufficient TTL +([`storage_bucket_version_ttl`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_storage_bucket_version_ttl)) +to meet any data-recovery requirements. diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md new file mode 100644 index 0000000000000..0eec79daba6ab --- /dev/null +++ b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md @@ -0,0 +1,583 @@ +--- +title: "Install" +description: "" +aliases: + - /self-hosted/install-on-gcp/ + - /installation/install-on-gcp/ +menu: + main: + parent: "install-on-gcp-legacy-terraform-module" + identifier: "legacy-terraform-module-install" + weight: 5 +--- + +{{% self-managed/materialize-components-sentence %}} + +This tutorial deploys Materialize to GCP Google Kubernetes Engine (GKE) cluster +with a Cloud SQL PostgreSQL database as the metadata database and Cloud Storage +bucket for blob storage. Specifically, the tutorial uses [Materialize on Google +Cloud Provider Terraform +module](https://github.com/MaterializeInc/terraform-google-materialize) to: + +- Set up the GCP environment. + +- Call + [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) + module to deploy Materialize Operator and Materialize instances to the GKE + cluster. + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< self-managed/tutorial-disclaimer >}} + +{{< /warning >}} + +## Prerequisites + +### Google cloud provider project + +You need a GCP project for which you have a role (such as +`roles/resourcemanager.projectIamAdmin` or `roles/owner`) that includes [ +permissions to manage access to the +project](https://cloud.google.com/iam/docs/granting-changing-revoking-access). + +### gcloud CLI + +If you do not have gcloud CLI, install. For details, see the [Install the gcloud +CLI documentation](https://cloud.google.com/sdk/docs/install). + +### Google service account + +The tutorial assumes the use of a service account. If you do not have a service +account to use for this tutorial, create a service account. For details, see +[Create service +accounts](https://cloud.google.com/iam/docs/service-accounts-create#creating). + +### Terraform + +If you do not have Terraform installed, [install +Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform). + +### kubectl and plugins + +{{< tip >}} + +Using `gcloud` to install `kubectl` will also install the needed plugins. +Otherwise, you will need to manually install the `gke-gcloud-auth-plugin` for +`kubectl`. + +{{< /tip >}} + +- If you do not have `kubectl`, install `kubectl`. To install, see [Install + kubectl and configure cluster + access](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) + for details. You will configure `kubectl` to interact with your GKE cluster + later in the tutorial. + +- If you do not have `gke-gcloud-auth-plugin` for `kubectl`, install the + `gke-gcloud-auth-plugin`. For details, see [Install the + gke-gcloud-auth-plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin). + +### Helm 3.2.0+ + +If you do not have Helm version 3.2.0+ installed, install. For details, see the +[Helm documentation](https://helm.sh/docs/intro/install/). + +### jq (Optional) + +*Optional*. `jq` is used to parse the GKE cluster name and region from the +Terraform outputs. Alternatively, you can manually specify the name and region. +If you want to use `jq` and do not have `jq` installed, install. + +### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +## A. Configure GCP project and service account + +1. Open a Terminal window. + +1. Initialize the gcloud CLI (`gcloud init`) to specify the GCP project you want + to use. For details, see the [Initializing the gcloud CLI + documentation](https://cloud.google.com/sdk/docs/initializing#initialize_the). + + {{< tip >}} + You do not need to configure a default Compute Region and Zone as you will + specify the region. + {{}} + +1. Enable the following services for your GCP project, if not already enabled: + + ```bash + gcloud services enable container.googleapis.com # For creating Kubernetes clusters + gcloud services enable sqladmin.googleapis.com # For creating databases + gcloud services enable cloudresourcemanager.googleapis.com # For managing GCP resources + gcloud services enable servicenetworking.googleapis.com # For private network connections + gcloud services enable iamcredentials.googleapis.com # For security and authentication + ``` + +1. To the service account that will run the Terraform script, + grant the following IAM roles: + + - `roles/editor` + - `roles/iam.serviceAccountAdmin` + - `roles/servicenetworking.networksAdmin` + - `roles/storage.admin` + - `roles/container.admin` + + 1. Enter your GCP project ID. + + ```bash + read -s PROJECT_ID + ``` + + 1. Find your service account email for your GCP project + + ```bash + gcloud iam service-accounts list --project $PROJECT_ID + ``` + + 1. Enter your service account email. + + ```bash + read -s SERVICE_ACCOUNT + ``` + + 1. Grant the service account the neccessary IAM roles. + + ```bash + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member="serviceAccount:$SERVICE_ACCOUNT" \ + --role="roles/editor" + + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member="serviceAccount:$SERVICE_ACCOUNT" \ + --role="roles/iam.serviceAccountAdmin" + + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member="serviceAccount:$SERVICE_ACCOUNT" \ + --role="roles/servicenetworking.networksAdmin" + + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member="serviceAccount:$SERVICE_ACCOUNT" \ + --role="roles/storage.admin" + + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member="serviceAccount:$SERVICE_ACCOUNT" \ + --role="roles/container.admin" + ``` + +1. For the service account, authenticate to allow Terraform to + interact with your GCP project. For details, see [Terraform: Google Cloud + Provider Configuration + reference](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#authentication). + + For example, if using [User Application Default + Credentials](https://cloud.google.com/sdk/gcloud/reference/auth/application-default), + you can run the following command: + + ```bash + gcloud auth application-default login + ``` + + {{< tip >}} + If using `GOOGLE_APPLICATION_CREDENTIALS`, use absolute path to your key file. + {{}} + +## B. Set up GCP Kubernetes environment and install Materialize + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +{{< tabs >}} + +{{< tab "Deployed components" >}} +[Materialize on GCP Terraform +module](https://github.com/MaterializeInc/terraform-google-materialize) deploys +a sample infrastructure on GCP (region `us-central1`) with the following +components: + +{{< yaml-table data="self_managed/gcp_terraform_deployed_components" >}} + +{{< tip >}} +{{< self-managed/gcp-terraform-configs >}} +{{< /tip >}} +{{}} +{{< tab "Releases" >}} + +{{< yaml-table data="self_managed/gcp_terraform_versions" >}} + +{{}} +{{}} + +{{% self-managed/versions/step-clone-google-terraform-repo %}} + +1. Go to the `examples/simple` folder in the Materialize Terraform repo + directory. + + ```bash + cd terraform-google-materialize/examples/simple + ``` + + {{< tip >}} + {{< self-managed/gcp-terraform-configs >}} + {{< /tip >}} + +1. Create a `terraform.tfvars` file (you can copy from the + `terraform.tfvars.example` file) and specify the following variables: + + | **Variable** | **Description** | + |--------------|-----------------| + | `project_id` | Your GCP project ID. | + | `prefix` | A prefix (e.g., `mz-simple`) for your resources. Prefix has a maximum of 15 characters and contains only alphanumeric characters and dashes. | + | `region` | The region for the GKE cluster. | + + ```bash + project_id = "enter-your-gcp-project-id" + prefix = "enter-your-prefix" // Maximum of 15 characters, contain lowercase alphanumeric and hyphens only (e.g., mz-simple) + region = "us-central1" + ``` + + {{< tip >}} + + {{< self-managed/gcp-terraform-configs >}} + + {{< /tip >}} + +1. Initialize the terraform directory. + + ```bash + terraform init + ``` + +1. Run terraform plan and review the changes to be made. + + ```bash + terraform plan + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply + ``` + + To approve the changes and apply, enter `yes`. + + Upon successful completion, various fields and their values are output: + + ```bash + Apply complete! Resources: 27 added, 0 changed, 0 destroyed. + + Outputs: + + connection_strings = + gke_cluster = + load_balancer_details = {} + network = { + "network_id" = "projects/my-project/global/networks/mz-simple-network" + "network_name" = "mz-simple-network" + "subnet_name" = "mz-simple-subnet" + } + service_accounts = { + "gke_sa" = "mz-simple-gke-sa@my-project.iam.gserviceaccount.com" + "materialize_sa" = "mz-simple-materialize-sa@my-project.iam.gserviceaccount.com" + } + ``` + +1. Configure `kubectl` to connect to your GKE cluster, specifying: + + - ``. Your cluster name has the form `-gke`; e.g., + `mz-simple-gke`. + + - ``. By default, the example Terraform module uses the `us-central1` + region. + + - ``. Your GCP project ID. + + ```bash + gcloud container clusters get-credentials \ + --region \ + --project + ``` + + Alternatively, you can use the following command to get the cluster name and + region from the Terraform output and the project ID from the environment + variable set earlier. + + ```bash + gcloud container clusters get-credentials $(terraform output -json gke_cluster | jq -r .name) \ + --region $(terraform output -json gke_cluster | jq -r .location) --project $PROJECT_ID + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl cluster-info + ``` + + For help with `kubectl` commands, see [kubectl Quick + reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +1. By default, the example Terraform installs the Materialize Operator and, + starting in v0.3.0, a `cert-manager`. Verify the + installation and check the status: + + {{< tabs >}} + {{< tab "Materialize Operator" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n materialize + ``` + + Wait for the components to be in the `Running` state: + + ```none + NAME READY STATUS RESTARTS AGE + pod/materialize-mz-simple-materialize-operator-74d8f549d6-lkjjf 1/1 Running 0 36m + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/materialize-mz-simple-materialize-operator 1/1 1 1 36m + + NAME DESIRED CURRENT READY AGE + replicaset.apps/materialize-mz-simple-materialize-operator-74d8f549d6 1 1 1 36m + ``` + + {{}} + {{< tab "cert-manager (Starting in version 0.3.0)" >}} + + Verify the installation and check the status: + + ```shell + kubectl get all -n cert-manager + ``` + Wait for the components to be in the `Running` state: + ``` + NAME READY STATUS RESTARTS AGE + pod/cert-manager-6794b8d569-vt264 1/1 Running 0 22m + pod/cert-manager-cainjector-7f69cd69f7-7brqw 1/1 Running 0 22m + pod/cert-manager-webhook-6cc5dccc4b-7tmd4 1/1 Running 0 22m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/cert-manager ClusterIP 10.52.3.63 9402/TCP 22m + service/cert-manager-cainjector ClusterIP 10.52.15.171 9402/TCP 22m + service/cert-manager-webhook ClusterIP 10.52.5.148 443/TCP,9402/TCP 22m + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/cert-manager 1/1 1 1 22m + deployment.apps/cert-manager-cainjector 1/1 1 1 22m + deployment.apps/cert-manager-webhook 1/1 1 1 22m + + NAME DESIRED CURRENT READY AGE + replicaset.apps/cert-manager-6794b8d569 1 1 1 22m + replicaset.apps/cert-manager-cainjector-7f69cd69f7 1 1 1 22m + replicaset.apps/cert-manager-webhook-6cc5dccc4b 1 1 1 22m + ``` + + {{}} + {{}} + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting/). + +1. Once the Materialize operator is deployed and running, you can deploy the + Materialize instances. To deploy Materialize instances, create a + `mz_instances.tfvars` file with the Materialize instance configuration. + + For example, the following specifies the configuration for a `demo` instance. + + ```bash + cat < mz_instances.tfvars + + materialize_instances = [ + { + name = "demo" + namespace = "materialize-environment" + database_name = "demo_db" + cpu_request = "1" + memory_request = "2Gi" + memory_limit = "2Gi" + license_key = "" + } + ] + EOF + ``` + + - **Starting in v0.3.0**, the Materialize on GCP Terraform module also + deploys, by default: + + - [Load balancers](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) for Materialize instances (i.e., the [`create_load_balancer`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) flag defaults to `true`). The load balancers, by default, are configured to be internal (i.e., the [`internal_load_balancer`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_materialize_instances) flag defaults to `true`). + + - A self-signed `ClusterIssuer`. The `ClusterIssuer` is deployed after the + `cert-manager` is deployed and running. + + - **Starting in v0.4.3**, you can specify addition configuration options via + `environmentd_extra_args`. + + {{< tip >}} + {{% self-managed/gcp-terraform-upgrade-notes %}} + + See [Materialize on GCP releases](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) for notable changes. + {{}} + +1. Run `terraform plan` with both `.tfvars` files and review the changes to be + made. + + ```bash + terraform plan -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + The plan should show the changes to be made, with a summary similar to the + following: + + ``` + Plan: 9 to add, 1 to change, 0 to destroy. + ``` + +1. If you are satisfied with the changes, apply. + + ```bash + terraform apply -var-file=terraform.tfvars -var-file=mz_instances.tfvars + ``` + + To approve the changes and apply, enter `yes`. + + + Upon successful completion, you should see output with a summary similar to the following: + + ```bash + Apply complete! Resources: 9 added, 1 changed, 0 destroyed. + + Outputs: + + connection_strings = + gke_cluster = + load_balancer_details = { + "demo" = { + "balancerd_load_balancer_ip" = "192.0.2.10" + "console_load_balancer_ip" = "192.0.2.254" + } + } + network = { + "network_id" = "projects/my-project/global/networks/mz-simple-network" + "network_name" = "mz-simple-network" + "subnet_name" = "mz-simple-subnet" + } + service_accounts = { + "gke_sa" = "mz-simple-gke-sa@my-project.iam.gserviceaccount.com" + "materialize_sa" = "mz-simple-materialize-sa@my-project.iam.gserviceaccount.com" + } + ``` + +1. Verify the installation and check the status: + + ```bash + kubectl get all -n materialize-environment + ``` + + Wait for the components to be in the `Running` state. + + ```none + NAME READY STATUS RESTARTS AGE + pod/db-demo-db-wrvhw 0/1 Completed 0 4m26s + pod/mzdtwvu4qe4q-balancerd-6989df5c75-mpmqx 1/1 Running 0 3m54s + pod/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1-0 1/1 Running 0 3m53s + pod/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1-0 1/1 Running 0 3m52s + pod/mzdtwvu4qe4q-console-7c9bc94bcb-6t7lg 1/1 Running 0 3m41s + pod/mzdtwvu4qe4q-console-7c9bc94bcb-9x5qq 1/1 Running 0 3m41s + pod/mzdtwvu4qe4q-environmentd-1-0 1/1 Running 0 4m9s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/mzdtwvu4qe4q-balancerd ClusterIP None 6876/TCP,6875/TCP 3m54s + service/mzdtwvu4qe4q-balancerd-lb LoadBalancer 10.52.5.105 192.0.2.10 6875:30844/TCP,6876:32307/TCP 4m9s + service/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 3m53s + service/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1 ClusterIP None 2100/TCP,2103/TCP,2101/TCP,2102/TCP,6878/TCP 3m52s + service/mzdtwvu4qe4q-console ClusterIP None 8080/TCP 3m41s + service/mzdtwvu4qe4q-console-lb LoadBalancer 10.52.4.2 192.0.2.254 8080:32193/TCP 4m9s + service/mzdtwvu4qe4q-environmentd ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 3m54s + service/mzdtwvu4qe4q-environmentd-1 ClusterIP None 6875/TCP,6876/TCP,6877/TCP,6878/TCP 4m9s + service/mzdtwvu4qe4q-persist-pubsub-1 ClusterIP None 6879/TCP 4m9s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/mzdtwvu4qe4q-balancerd 1/1 1 1 3m54s + deployment.apps/mzdtwvu4qe4q-console 2/2 2 2 3m41s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/mzdtwvu4qe4q-balancerd-6989df5c75 1 1 1 3m54s + replicaset.apps/mzdtwvu4qe4q-console-7c9bc94bcb 2 2 2 3m41s + + NAME READY AGE + statefulset.apps/mzdtwvu4qe4q-cluster-s2-replica-s1-gen-1 1/1 3m53s + statefulset.apps/mzdtwvu4qe4q-cluster-u1-replica-u1-gen-1 1/1 3m52s + statefulset.apps/mzdtwvu4qe4q-environmentd-1 1/1 4m9s + + NAME STATUS COMPLETIONS DURATION AGE + job.batch/db-demo-db Complete 1/1 12s 4m27s + + ``` + + If you run into an error during deployment, refer to the + [Troubleshooting](/installation/troubleshooting/). + +1. Open the Materialize Console in your browser: + + {{< tabs >}} + + {{< tab "Via Network Load Balancer" >}} + + Starting in v0.3.0, for each Materialize instance, Materialize on GCP + Terraform module also deploys load balancers (by default, internal) with the + following listeners, including a listener on port 8080 for the Materialize + Console: + + | Port | Description | + | ---- | ------------| + | 6875 | For SQL connections to the database | + | 6876 | For HTTP(S) connections to the database | + | **8080** | **For HTTP(S) connections to Materialize Console** | + + The load balancer details are found in the `load_balancer_details` in + the [Terraform output](#gcp-terraform-output). + + The example uses a self-signed ClusterIssuer. As such, you may encounter a + warning with regards to the certificate. In production, run with certificates + from an official Certificate Authority (CA) rather than self-signed + certificates. + + {{}} + + {{< tab "Via port forwarding" >}} + + {{% self-managed/port-forwarding-handling console="console-lb"%}} + + {{}} + {{}} + + + {{< tip >}} + + {{% self-managed/troubleshoot-console-mz_catalog_server_blurb %}} + + {{< /tip >}} + +## Next steps + +{{% self-managed/next-steps %}} + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + +## See also + +- [Troubleshooting](/installation/troubleshooting/) +- [Materialize Operator Configuration](/installation/configuration/) +- [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) +- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-gcp/upgrade-on-gcp.md b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md similarity index 97% rename from doc/user/content/installation/install-on-gcp/upgrade-on-gcp.md rename to doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md index eab4ab0ebe91b..92f9f2d0b93d2 100644 --- a/doc/user/content/installation/install-on-gcp/upgrade-on-gcp.md +++ b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md @@ -1,11 +1,13 @@ --- -title: "Upgrade on GCP (Terraform)" +title: "Upgrade" description: "Procedure to upgrade your Materialize operator and instances running on GCP" menu: main: - parent: "install-on-gcp" - identifier: "upgrade-on-gcp" + parent: "install-on-gcp-legacy-terraform-module" + identifier: "legacy-terraform-module-upgrade" weight: 10 +aliases: + - /installation/install-on-gcp/upgrade-on-gcp --- {{< annotation type="Disambiguation" >}} diff --git a/doc/user/content/installation/install-on-gcp/terraform-module/_index.md b/doc/user/content/installation/install-on-gcp/terraform-module/_index.md new file mode 100644 index 0000000000000..72acc7d697199 --- /dev/null +++ b/doc/user/content/installation/install-on-gcp/terraform-module/_index.md @@ -0,0 +1,164 @@ +--- +title: "Terraform Module" +description: "" +menu: + main: + parent: "install-on-gcp" + identifier: "install-gcp-terraform" + weight: 5 +--- + +Materialize provides a set of modular Terraform modules that can be used to +deploy all services required for a production ready Materialize database. +The module is intended to provide a simple set of examples on how to deploy +materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +The repository can be found at: + +***[Materialize Terraform Self-Managed GCP](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp)*** + +Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) documentation for a full understanding +of the module structure and customizations. + +Also check out the [GCP deployment guide](/installation/install-on-gcp/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. + +{{< note >}} +{{% self-managed/materialize-components-sentence %}} +{{< /note >}} + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + + +## Prerequisites + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [GCloud Cli](https://cloud.google.com/sdk/docs/install) +- [`kubectl`](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) +- [kubectl gke plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) +- [a Google Cloud service account](https://cloud.google.com/iam/docs/service-accounts-create#creating) + +#### License key + +{{< include-md file="shared-content/license-key-required.md" >}} + +--- + +# Example: Simple Materialize Deployment on GCP + +This example demonstrates how to deploy a complete Materialize environment on GCP using the modular Terraform setup from this repository. + + +## Setup +```shell +git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git +cd materialize-terraform-self-managed/gcp/examples/simple +```` + +## What Gets Created + +This example provisions the following infrastructure: + +### Networking +- **VPC Network**: Custom VPC with auto-create subnets disabled +- **Subnet**: 192.168.0.0/20 primary range with private Google access enabled +- **Secondary Ranges**: + - Pods: 192.168.64.0/18 + - Services: 192.168.128.0/20 +- **Cloud Router**: For NAT and routing configuration +- **Cloud NAT**: For outbound internet access from private nodes +- **VPC Peering**: Service networking connection for Cloud SQL private access + +### Compute +- **GKE Cluster**: Regional cluster with Workload Identity enabled +- **Generic Node Pool**: e2-standard-8 machines, autoscaling 2-5 nodes, 50GB disk, for general workloads +- **Materialize Node Pool**: n2-highmem-8 machines, autoscaling 2-5 nodes, 100GB disk, 1 local SSD, swap enabled, dedicated taints for Materialize workloads +- **Service Account**: GKE service account with workload identity binding + +### Database +- **Cloud SQL PostgreSQL**: Private IP only (no public IP) +- **Tier**: db-custom-2-4096 (2 vCPUs, 4GB memory) +- **Database**: `materialize` database with UTF8 charset +- **User**: `materialize` user with auto-generated password +- **Network**: Connected via VPC peering for private access + +### Storage +- **Cloud Storage Bucket**: Regional bucket for Materialize persistence +- **Access**: HMAC keys for S3-compatible access (Workload Identity service account with storage permissions is configured but not currently used by Materialize for GCS access, in future we will remove HMAC keys and support access to GCS either via Workload Identity Federation or via Kubernetes ServiceAccounts that impersonate IAM service accounts) +- **Versioning**: Disabled (for testing; enable in production) + +### Kubernetes Add-ons +- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal +- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +### Materialize +- **Operator**: Materialize Kubernetes operator in `materialize` namespace +- **Instance**: Single Materialize instance in `materialize-environment` namespace +- **Load Balancers**: GCP Load Balancers for Materialize access + +--- + +## Required APIs +Your GCP project needs several APIs enabled. Here's what each API does in simple terms: + +```bash +# Enable these APIs in your project +gcloud services enable container.googleapis.com # For creating Kubernetes clusters +gcloud services enable sqladmin.googleapis.com # For creating databases +gcloud services enable cloudresourcemanager.googleapis.com # For managing GCP resources +gcloud services enable servicenetworking.googleapis.com # For private network connections +gcloud services enable iamcredentials.googleapis.com # For security and authentication +``` + +## Getting Started + +### Step 1: Set Required Variables + +Before running Terraform, create a `terraform.tfvars` file with the following variables: + +```hcl +project_id = "my-gcp-project" +name_prefix = "simple-demo" +region = "us-central1" +license_key = "your-materialize-license-key" # Optional: Get from https://materialize.com/self-managed/ +labels = { + environment = "demo" + created_by = "terraform" +} +``` + +**Required Variables:** +- `project_id`: GCP project ID +- `name_prefix`: Prefix for all resource names +- `region`: GCP region for deployment +- `labels`: Map of labels to apply to resources +- `license_key`: Materialize license key (required for production use) + +--- + +### Step 2: Deploy Materialize + +Run the usual Terraform workflow: + +```bash +terraform init +terraform apply +``` + +## Notes + +* ***GCP Storage Authentication Limitation:*** Materialize currently only supports HMAC key authentication for GCS access (S3-compatible API). + Current State: The modules configure both HMAC keys and Workload Identity, but Materialize uses HMAC keys for actual storage access. + Future: Native GCS access via Workload Identity Federation or Kubernetes service account impersonation will be supported in a future release, eliminating the need for static credentials. +* You can customize each module independently. +* To reduce cost in your demo environment, you can tweak machine types and database tiers in `main.tf`. + +***Don't forget to destroy resources when finished:*** +```bash +terraform destroy +``` diff --git a/doc/user/content/installation/operational-guidelines.md b/doc/user/content/installation/operational-guidelines.md index de246686733c0..0ad4a3baf4ae2 100644 --- a/doc/user/content/installation/operational-guidelines.md +++ b/doc/user/content/installation/operational-guidelines.md @@ -12,9 +12,11 @@ menu: ## Recommended instance types +As a general guideline, we recommend: + - ARM-based CPU -- 1:8 ratio of vCPU to GiB memory (if spill-to-disk is not enabled) -- 1:16 ratio of vCPU to GiB local instance storage (if spill-to-disk is enabled) +- A 1:8 ratio of vCPU to GiB memory is recommend. +- When using swap, it is recommend to use a 1:16 ratio of vCPU to GiB local instance storage See also the specific cloud provider guidance: @@ -27,12 +29,6 @@ See also the specific cloud provider guidance: - [Azure Deployment guidelines](/installation/install-on-azure/appendix-deployment-guidelines/#recommended-instance-types) -## CPU affinity - -It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). -This ensures that each worker thread of Materialize is given exclusively access to a vCPU. Our benchmarks have shown this -to substantially improve the performance of compute-bound workloads. - ## TLS When running with TLS in production, run with certificates from an official @@ -40,11 +36,11 @@ Certificate Authority (CA) rather than self-signed certificates. ## Locally-attached NVMe storage -For optimal performance, Materialize requires fast, locally-attached NVMe -storage. Having a locally-attached storage allows Materialize to spill to disk -when operating on datasets larger than main memory as well as allows for a more -graceful degradation rather than OOMing. Network-attached storage (like EBS -volumes) can significantly degrade performance and is not supported. +Configuring swap on nodes to using locally-attached NVMe storage allows +Materialize to spill to disk when operating on datasets larger than main memory. +This setup can provide significant cost savings and provides a more graceful +degradation rather than OOMing. Network-attached storage (like EBS volumes) can +significantly degrade performance and is not supported. Refer to the specific cloud provider guidelines: diff --git a/doc/user/content/installation/upgrading.md b/doc/user/content/installation/upgrading.md new file mode 100644 index 0000000000000..424ac916f7d69 --- /dev/null +++ b/doc/user/content/installation/upgrading.md @@ -0,0 +1,187 @@ +--- +title: "Upgrade Overview" +description: "Upgrading Self-Managed Materialize." +menu: + main: + parent: "installation" +--- + +The following provides a general outline and examples for upgrading Materialize. + +For a more specific set of steps, please consult the deployment-specific upgrade +documentation: + - [Minikube](/installation/install-on-local-minikube/upgrade-on-local-minikube/) + - [Kind](/installation/install-on-local-kind/upgrade-on-local-kind/) + - [AWS](/installation/install-on-aws/legacy-terraform-module/upgrade/) + - [GCP](/installation/install-on-gcp/legacy-terraform-module/upgrade/) + - [Azure](/installation/install-on-azure/legacy-terraform-module/upgrade/) + +***When upgrading always***: +- Upgrade the operator first and ensure version compatibility between the operator and the Materialize instance you are upgrading to. +- Upgrade your Materialize instances after upgrading the operator to ensure compatibility. +- Check the [version specific upgrade notes](#version-specific-upgrade-notes). + +### Upgrading the Helm Chart and Kubernetes Operator + +{{< important >}} + +When upgrading Materialize, always upgrade the operator first. + +{{}} + +The Materialize Kubernetes operator is deployed via Helm and can be updated through standard Helm upgrade commands. + +```shell +helm upgrade my-materialize-operator materialize/misc/helm-charts/operator +``` + +If you have custom values, make sure to include your values file: + +```shell +helm upgrade my-materialize-operator materialize/misc/helm-charts/operator -f my-values.yaml +``` + +### Upgrading Materialize Instances + +In order to minimize unexpected downtime and avoid connection drops at critical +periods for your application, changes are not immediately and automatically +rolled out by the Operator. Instead, the upgrade process involves two steps: +- First, staging spec changes to the Materialize custom resource. +- Second, applying the changes via a `rolloutRequest`. + +When upgrading your Materialize instances, you'll first want to update the `environmentdImageRef` field in the Materialize custom resource spec. + +#### Updating the `environmentdImageRef` +To find a compatible version with your currently deployed Materialize operator, check the `appVersion` in the Helm repository. + +```shell +helm list -n materialize +``` + +Using the returned version, we can construct an image ref. +We always recommend using the official Materialize image repository +`docker.io/materialize/environmentd`. + +``` +environmentdImageRef: docker.io/materialize/environmentd:v26.0.0 +``` + +The following is an example of how to patch the version. +```shell +# For version updates, first update the image reference +kubectl patch materialize \ + -n \ + --type='merge' \ + -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\"}}" +``` + +#### Applying the changes via `rolloutRequest` + +To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. +Be sure to consult the [Rollout Configurations](#rollout-configuration) to ensure you've selected the correct rollout behavior. +```shell +# Then trigger the rollout with a new UUID +kubectl patch materialize \ + -n \ + --type='merge' \ + -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" +``` + + +It is possible to combine both operations in a single command if preferred: + +```shell +kubectl patch materialize 12345678-1234-1234-1234-123456789012 \ + -n materialize-environment \ + --type='merge' \ + -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\", \"requestRollout\": \"$(uuidgen)\"}}" +``` + +#### Using YAML Definition + +Alternatively, you can update your Materialize custom resource definition directly: + +```yaml +apiVersion: materialize.cloud/v1alpha1 +kind: Materialize +metadata: + name: 12345678-1234-1234-1234-123456789012 + namespace: materialize-environment +spec: + environmentdImageRef: materialize/environmentd:v26.0.0 # Update version as needed + requestRollout: 22222222-2222-2222-2222-222222222222 # Generate new UUID + forceRollout: 33333333-3333-3333-3333-333333333333 # Optional: for forced rollouts + inPlaceRollout: false # In Place rollout is deprecated and ignored. Please use rolloutStrategy + rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. Can be WaitUntilReady or ImmediatelyPromoteCausingDowntime + backendSecretName: materialize-backend +``` + +Apply the updated definition: + +```shell +kubectl apply -f materialize.yaml +``` + +### Rollout Configuration + +#### Forced Rollouts + +If you need to force a rollout even when there are no changes to the instance: + +```shell +kubectl patch materialize \ + -n materialize-environment \ + --type='merge' \ + -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\", \"forceRollout\": \"$(uuidgen)\"}}" +``` + +#### Rollout Strategies +The behavior of the new version rollout follows your `rolloutStrategy` setting: + +`WaitUntilReady` (default): + +New instances are created and all dataflows are determined to be ready before cutover and terminating the old version, temporarily requiring twice the resources during the transition. + +`ImmediatelyPromoteCausingDowntime`: + +Tears down the prior version before creating and promoting the new version. This causes downtime equal to the duration it takes for dataflows to hydrate, but does not require additional resources. + +#### In Place Rollout + +`inPlaceRollout` has been deprecated and will be ignored. + + +### Verifying the Upgrade + +After initiating the rollout, you can monitor the status field of the Materialize custom resource to check on the upgrade. + +```shell +# Watch the status of your Materialize environment +kubectl get materialize -n materialize-environment -w + +# Check the logs of the operator +kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize +``` +### Version Specific Upgrade Notes + +#### Upgrading to `v26.0` +- This is a major version upgrade. In order to upgrade to `v26.0`, you must first upgrade to `v25.2.15`, then upgrade to `v26.0.0`. +- New requirements were introduced for license keys. In order to upgrade, you will + first need to add a license key to the `backendSecret` used in the spec for your + Materialize resource. Please refer to our [instructions on how to get and install a license keys](/installation/faq#how-do-i-get-a-license-key). +- Swap is now enabled by default. Swap reduces the memory required to + operate Materialize and improves cost efficiency. Upgrading to `v26.0` + requires some preparation to ensure kubernetes nodes are labeled + and configured correctly. Please refer to our guides: + + {{< yaml-table data="self_managed/enable_swap_upgrade_guides" >}} + + +#### Upgrading between minor versions less than `v26` + - Prior to `v26`, you must upgrade at most one minor version at a time. For example, upgrading from `v25.1.5` to `v25.2.15` is permitted. + +## See also + +- [Configuration](/installation/configuration/) +- [Installation](/installation/) +- [Troubleshooting](/installation/troubleshooting/) diff --git a/doc/user/data/self_managed/legacy_terraform_list.yml b/doc/user/data/self_managed/legacy_terraform_list.yml new file mode 100644 index 0000000000000..8170eb218d06f --- /dev/null +++ b/doc/user/data/self_managed/legacy_terraform_list.yml @@ -0,0 +1,29 @@ +columns: + - column: "Sample Module" + - column: "Description" + +rows: +- "Sample Module": | + [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) + "Description": | + A sample Terraform module for installing the Materialize Helm chart into a Kubernetes cluster. + +- "Sample Module": | + [Materialize on AWS](https://github.com/MaterializeInc/terraform-aws-materialize) + "Description": | + A sample Terraform module for deploying Materialize on AWS Cloud Platform + with all required infrastructure components. + See [Install on AWS](/installation/install-on-aws/) for an example usage. + +- "Sample Module": | + [Materialize on Azure](https://github.com/MaterializeInc/terraform-azurerm-materialize) + "Description": | + A sample Terraform module for deploying Materialize on Azure with all + required infrastructure components. See [Install on + Azure](/installation/install-on-azure/) for an example usage. + +- "Sample Module": | + [Materialize on Google Cloud Platform (GCP)](https://github.com/MaterializeInc/terraform-google-materialize) + "Description": | + A sample Terraform module for deploying Materialize on Google Cloud Platform + (GCP) with all required infrastructure components. See [Install on GCP](/installation/install-on-gcp/) for an example usage. diff --git a/doc/user/data/self_managed/terraform_list.yml b/doc/user/data/self_managed/terraform_list.yml index 8170eb218d06f..94c2481bafd32 100644 --- a/doc/user/data/self_managed/terraform_list.yml +++ b/doc/user/data/self_managed/terraform_list.yml @@ -1,29 +1,22 @@ columns: - - column: "Sample Module" + - column: "Module" - column: "Description" rows: -- "Sample Module": | - [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) +- "Module": | + [Amazon Web Services (AWS)](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) "Description": | - A sample Terraform module for installing the Materialize Helm chart into a Kubernetes cluster. + An example Terraform module for deploying Materialize on AWS + See [Install on Azure](/installation/install-on-gcp/) for detailed instructions usage. -- "Sample Module": | - [Materialize on AWS](https://github.com/MaterializeInc/terraform-aws-materialize) +- "Module": | + [Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) "Description": | - A sample Terraform module for deploying Materialize on AWS Cloud Platform - with all required infrastructure components. - See [Install on AWS](/installation/install-on-aws/) for an example usage. + An example Terraform module for deploying Materialize on Azure + See [Install on Azure](/installation/install-on-azure/) for detailed instructions usage. -- "Sample Module": | - [Materialize on Azure](https://github.com/MaterializeInc/terraform-azurerm-materialize) +- "Module": | + [Google Cloud Platform (GCP)](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) "Description": | - A sample Terraform module for deploying Materialize on Azure with all - required infrastructure components. See [Install on - Azure](/installation/install-on-azure/) for an example usage. - -- "Sample Module": | - [Materialize on Google Cloud Platform (GCP)](https://github.com/MaterializeInc/terraform-google-materialize) - "Description": | - A sample Terraform module for deploying Materialize on Google Cloud Platform - (GCP) with all required infrastructure components. See [Install on GCP](/installation/install-on-gcp/) for an example usage. + An example Terraform module for deploying Materialize on GCP + See [Install on Azure](/installation/install-on-gcp/) for detailed instructions usage. diff --git a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html index 82e7dc968bf68..83c84ec02eca3 100644 --- a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html @@ -1,9 +1,9 @@ The tutorial uses the `main.tf` found in the `examples/simple/` directory, which -requires minimal user input. For details on the `examples/simple/` +requires minimal user input. For details on the `examples/simple/` infrastructure configuration (such as the node instance type, etc.), see the [examples/simple/main.tf](https://github.com/MaterializeInc/terraform-aws-materialize/blob/main/examples/simple/main.tf). For more configuration options, you can use the `main.tf` file at the [root of the repository](https://github.com/MaterializeInc/terraform-aws-materialize/) instead. When running with the root `main.tf`, see [AWS required -configuration](/installation/install-on-aws/appendix-aws-configuration/). +configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/). \ No newline at end of file diff --git a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html index f87f789efa914..85e18a616dae6 100644 --- a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html @@ -7,4 +7,4 @@ the repository](https://github.com/MaterializeInc/terraform-azurerm-materialize/) instead. When running with the root `main.tf`, see [Azure required -configuration](/installation/install-on-azure/appendix-azure-configuration/). +configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/). \ No newline at end of file diff --git a/misc/helm-charts/operator/README.md b/misc/helm-charts/operator/README.md index 3dd939312b913..995140fe62e00 100644 --- a/misc/helm-charts/operator/README.md +++ b/misc/helm-charts/operator/README.md @@ -406,12 +406,6 @@ Materialize has been vetted to work on instances with the following properties: When operating in AWS, we recommend using the `r7gd` and `r6gd` families of instances (and `r8gd` once available) when running with local disk, and the `r8g`, `r7g`, and `r6g` families when running without local disk. -## CPU Affinity - -It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). -This ensures that each worker thread of Materialize is given exclusively access to a vCPU. Our benchmarks have shown this -to substantially improve the performance of compute-bound workloads. - ## Learn More - [Materialize Documentation](https://materialize.com/docs) diff --git a/misc/helm-charts/operator/README.md.gotmpl b/misc/helm-charts/operator/README.md.gotmpl index a69d25b3ab480..7b5af4ad70f89 100644 --- a/misc/helm-charts/operator/README.md.gotmpl +++ b/misc/helm-charts/operator/README.md.gotmpl @@ -347,12 +347,6 @@ Materialize has been vetted to work on instances with the following properties: When operating in AWS, we recommend using the `r7gd` and `r6gd` families of instances (and `r8gd` once available) when running with local disk, and the `r8g`, `r7g`, and `r6g` families when running without local disk. -## CPU Affinity - -It is strongly recommended to enable the Kubernetes `static` [CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy). -This ensures that each worker thread of Materialize is given exclusively access to a vCPU. Our benchmarks have shown this -to substantially improve the performance of compute-bound workloads. - ## Learn More - [Materialize Documentation](https://materialize.com/docs) From 1a2e83b64cbefb82c97493980483cbcb685789a3 Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Tue, 2 Dec 2025 11:08:14 -0600 Subject: [PATCH 02/11] pr review changes --- doc/user/content/installation/_index.md | 176 +----------------- .../installation/install-on-aws/_index.md | 4 +- .../appendix-deployment-guidelines.md | 8 +- .../install-on-aws/terraform-module/_index.md | 81 ++++---- .../installation/install-on-azure/_index.md | 4 +- .../appendix-deployment-guidelines.md | 8 +- .../terraform-module/_index.md | 86 +++++---- .../installation/install-on-gcp/_index.md | 4 +- .../appendix-deployment-guidelines.md | 8 +- .../install-on-gcp/terraform-module/_index.md | 84 +++++---- .../install-on-local-kind/_index.md | 2 +- .../content/installation/upgrade-to-swap.md | 2 +- doc/user/content/installation/upgrading.md | 25 +-- doc/user/content/releases/_index.md | 4 +- .../self-managed/aws-terraform-configs.html | 2 +- .../self-managed/azure-terraform-configs.html | 2 +- .../general-rules-for-upgrades.md | 2 +- .../self-managed/prepare-nodes-and-upgrade.md | 2 +- .../self-managed/upgrade-notes/v26.0.md | 2 +- 19 files changed, 185 insertions(+), 321 deletions(-) diff --git a/doc/user/content/installation/_index.md b/doc/user/content/installation/_index.md index c0550e9b16b61..17dc5f3ffa101 100644 --- a/doc/user/content/installation/_index.md +++ b/doc/user/content/installation/_index.md @@ -12,184 +12,10 @@ aliases: {{< include-md file="shared-content/self-managed/install-landing-page.md" >}} -## Upgrade - -{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" >}} - -### Upgrade guides - -The following upgrade guides are available: - -| | Notes | -| ------------- | -------| -| [Upgrade on kind](/installation/install-on-local-kind/upgrade-on-local-kind/) | -| [Upgrade on AWS](/installation/install-on-aws/upgrade-on-aws/) | Uses Materialize provided Terraform | -| [Upgrade on Azure Kubernetes Service (AKS)](/installation/install-on-azure/upgrade-on-azure/) | Uses Materialize provided Terraform | -| [Upgrade on Google Kubernetes Engine (GKE)](/installation/install-on-gcp/upgrade-on-gcp/) | Uses Materialize provided Terraform | - - -### General notes for upgrades - -The following provides some general notes for upgrades. For specific examples, -see the [Upgrade guides](#upgrade-guides) - - -#### Upgrading the Helm Chart and Kubernetes Operator - -{{< important >}} - -When upgrading Materialize, always upgrade the operator first. - -{{}} - -The Materialize Kubernetes operator is deployed via Helm and can be updated through standard Helm upgrade commands. - -```shell -helm upgrade my-materialize-operator materialize/misc/helm-charts/operator -``` - -If you have custom values, make sure to include your values file: - -```shell -helm upgrade my-materialize-operator materialize/misc/helm-charts/operator -f my-values.yaml -``` - -#### Upgrading Materialize Instances - -In order to minimize unexpected downtime and avoid connection drops at critical -periods for your application, changes are not immediately and automatically -rolled out by the Operator. Instead, the upgrade process involves two steps: -- First, staging spec changes to the Materialize custom resource. -- Second, applying the changes via a `requestRollout`. - -When upgrading your Materialize instances, you'll first want to update the `environmentdImageRef` field in the Materialize custom resource spec. - -##### Updating the `environmentdImageRef` -To find a compatible version with your currently deployed Materialize operator, check the `appVersion` in the Helm repository. - -```shell -helm list -n materialize -``` - -Using the returned version, we can construct an image ref. -We always recommend using the official Materialize image repository -`docker.io/materialize/environmentd`. - -``` -environmentdImageRef: docker.io/materialize/environmentd:v26.0.0 -``` - -The following is an example of how to patch the version. -```shell -# For version updates, first update the image reference -kubectl patch materialize \ - -n \ - --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\"}}" -``` - -##### Applying the changes via `requestRollout` - -To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. -Be sure to consult the [Rollout Configurations](#rollout-configuration) to ensure you've selected the correct rollout behavior. -```shell -# Then trigger the rollout with a new UUID -kubectl patch materialize \ - -n \ - --type='merge' \ - -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" -``` - - -It is possible to combine both operations in a single command if preferred: - -```shell -kubectl patch materialize \ - -n materialize-environment \ - --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\", \"requestRollout\": \"$(uuidgen)\"}}" -``` - -##### Using YAML Definition - -Alternatively, you can update your Materialize custom resource definition directly: - -```yaml -apiVersion: materialize.cloud/v1alpha1 -kind: Materialize -metadata: - name: 12345678-1234-1234-1234-123456789012 - namespace: materialize-environment -spec: - environmentdImageRef: materialize/environmentd:v26.0.0 # Update version as needed - requestRollout: 22222222-2222-2222-2222-222222222222 # Generate new UUID - forceRollout: 33333333-3333-3333-3333-333333333333 # Optional: for forced rollouts - inPlaceRollout: false # In Place rollout is deprecated and ignored. Please use rolloutStrategy - rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. Can be WaitUntilReady or ImmediatelyPromoteCausingDowntime - backendSecretName: materialize-backend -``` - -Apply the updated definition: - -```shell -kubectl apply -f materialize.yaml -``` - -#### Rollout Configuration - -##### Forced Rollouts - -If you need to force a rollout even when there are no changes to the instance: - -```shell -kubectl patch materialize \ - -n materialize-environment \ - --type='merge' \ - -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\", \"forceRollout\": \"$(uuidgen)\"}}" -``` - -##### Rollout Strategies - -The behavior of the new version rollout follows your `rolloutStrategy` setting: - -`WaitUntilReady` (default): - -New instances are created and all dataflows are determined to be ready before cutover and terminating the old version, temporarily requiring twice the resources during the transition. - -`ImmediatelyPromoteCausingDowntime`: - -Tears down the prior version before creating and promoting the new version. This causes downtime equal to the duration it takes for dataflows to hydrate, but does not require additional resources. - -##### In Place Rollout - -The `inPlaceRollout` setting has been deprecated and will be ignored. - -### Verifying the Upgrade - -After initiating the rollout, you can monitor the status field of the Materialize custom resource to check on the upgrade. - -```shell -# Watch the status of your Materialize environment -kubectl get materialize -n materialize-environment -w - -# Check the logs of the operator -kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize -``` -### Version Specific Upgrade Notes - -#### Upgrading to `v26.1` and later versions -{{< include-md file="shared-content/self-managed/upgrade-notes/v26.1.md" >}} - -#### Upgrading to `v26.0` - -{{< include-md file="shared-content/self-managed/upgrade-notes/v26.0.md" >}} - -#### Upgrading between minor versions less than `v26` - - Prior to `v26`, you must upgrade at most one minor version at a time. For - example, upgrading from `v25.1.5` to `v25.2.16` is permitted. ## See also - [Materialize Operator Configuration](/installation/configuration/) +- [Upgrade Guide Configuration](/installation/upgrading/) - [Troubleshooting](/installation/troubleshooting/) - [FAQ](/installation/faq/) diff --git a/doc/user/content/installation/install-on-aws/_index.md b/doc/user/content/installation/install-on-aws/_index.md index 4e84093d164d2..0919ee64f2fea 100644 --- a/doc/user/content/installation/install-on-aws/_index.md +++ b/doc/user/content/installation/install-on-aws/_index.md @@ -16,8 +16,8 @@ menu: | Guide | Description | |-------|-------------| -| [Terraform Provider](/installation/install-on-aws/terraform-module/) | Install Materialize on AWS using our new Unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-aws/legacy-terraform-module/) | Install Materialize on AWS using our Terraform Provider (legacy) | +| [Terraform Provider](/installation/install-on-aws/terraform-module/) | Install Materialize on AWS using our new unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-aws/legacy-terraform-module/) | Install Materialize on AWS using our legacy Terraform Provider | | [Appendix: AWS deployment guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) | Additional guidelines for AWS deployments | ## See also diff --git a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md index 755f7bfb926d1..248f169d4b7b4 100644 --- a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md @@ -15,14 +15,14 @@ menu: As a general guideline, we recommend: - ARM-based CPU -- A 1:8 ratio of vCPU to GiB memory is recommended. -- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. +- A 1:8 ratio of vCPU to GiB memory. +- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. {{% self-managed/aws-recommended-instances %}} ## Locally-attached NVMe storage -Configuring swap on nodes to using locally-attached NVMe storage allows +Configuring swap on nodes to use locally-attached NVMe storage allows Materialize to spill to disk when operating on datasets larger than main memory. This setup can provide significant cost savings and provides a more graceful degradation rather than OOMing. Network-attached storage (like EBS volumes) can @@ -45,7 +45,7 @@ With this change, the Terraform: See [Upgrade Notes](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#v061). {{< note >}} -If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} ## TLS diff --git a/doc/user/content/installation/install-on-aws/terraform-module/_index.md b/doc/user/content/installation/install-on-aws/terraform-module/_index.md index 475455fa85d85..f2f09e4007525 100644 --- a/doc/user/content/installation/install-on-aws/terraform-module/_index.md +++ b/doc/user/content/installation/install-on-aws/terraform-module/_index.md @@ -9,17 +9,14 @@ menu: --- Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for a production ready Materialize database. +deploy all services required for Materialize to run on AWS. The module is intended to provide a simple set of examples on how to deploy -materialize. It can be used as is or modules can be taken from the example and +Materialize. It can be used as is or modules can be taken from the example and integrated with existing DevOps tooling. -The repository can be found at: - -***[Materialize Terraform Self-Managed AWS](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws)*** - -Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) documentation for a full understanding -of the module structure and customizations. +For details on the module structure and customization, see: +* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) Also check out the [AWS deployment guide](/installation/install-on-aws/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. @@ -43,7 +40,7 @@ Also check out the [AWS deployment guide](/installation/install-on-aws/appendix- #### License key -{{< include-md file="shared-content/license-key-required.md" >}} +{{< yaml-table data="self_managed/license_key" >}} --- @@ -64,40 +61,56 @@ cd materialize-terraform-self-managed/aws/examples/simple This example provisions the following infrastructure: ### Networking -- **VPC**: 10.0.0.0/16 with DNS hostnames and support enabled -- **Subnets**: 3 private subnets (10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24) and 3 public subnets (10.0.101.0/24, 10.0.102.0/24, 10.0.103.0/24) across availability zones us-east-1a, us-east-1b, us-east-1c -- **NAT Gateway**: Single NAT Gateway for all private subnets -- **Internet Gateway**: For public subnet connectivity + +| Resource | Description | +|----------|-------------| +| VPC | 10.0.0.0/16 with DNS hostnames and support enabled | +| Subnets | 3 private subnets (10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24) and 3 public subnets (10.0.101.0/24, 10.0.102.0/24, 10.0.103.0/24) across availability zones us-east-1a, us-east-1b, us-east-1c | +| NAT Gateway | Single NAT Gateway for all private subnets | +| Internet Gateway | For public subnet connectivity | ### Compute -- **EKS Cluster**: Version 1.32 with CloudWatch logging (API, audit) -- **Base Node Group**: 2 nodes (t4g.medium) for Karpenter and CoreDNS -- **Karpenter**: Auto-scaling controller with two node classes: - - Generic nodepool: t4g.xlarge instances for general workloads - - Materialize nodepool: r7gd.2xlarge instances with swap enabled and dedicated taints to run materialize instance workloads. + +| Resource | Description | +|----------|-------------| +| EKS Cluster | Version 1.32 with CloudWatch logging (API, audit) | +| Base Node Group | 2 nodes (t4g.medium) for Karpenter and CoreDNS | +| Karpenter | Auto-scaling controller with two node classes: Generic nodepool (t4g.xlarge instances for general workloads) and Materialize nodepool (r7gd.2xlarge instances with swap enabled and dedicated taints to run materialize instance workloads) | ### Database -- **RDS PostgreSQL**: Version 15, db.t3.large instance -- **Storage**: 50GB allocated, autoscaling up to 100GB -- **Deployment**: Single-AZ (non-production configuration) -- **Backups**: 7-day retention -- **Security**: Dedicated security group with access from EKS cluster and nodes + +| Resource | Description | +|----------|-------------| +| RDS PostgreSQL | Version 15, db.t3.large instance | +| Storage | 50GB allocated, autoscaling up to 100GB | +| Deployment | Single-AZ (non-production configuration) | +| Backups | 7-day retention | +| Security | Dedicated security group with access from EKS cluster and nodes | ### Storage -- **S3 Bucket**: Dedicated bucket for Materialize persistence -- **Encryption**: Disabled (for testing; enable in production) -- **Versioning**: Disabled (for testing; enable in production) -- **IAM Role**: IRSA role for Kubernetes service account access + +| Resource | Description | +|----------|-------------| +| S3 Bucket | Dedicated bucket for Materialize persistence | +| Encryption | Disabled (for testing; enable in production) | +| Versioning | Disabled (for testing; enable in production) | +| IAM Role | IRSA role for Kubernetes service account access | ### Kubernetes Add-ons -- **AWS Load Balancer Controller**: For managing Network Load Balancers -- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal -- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +| Resource | Description | +|----------|-------------| +| AWS Load Balancer Controller | For managing Network Load Balancers | +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | ### Materialize -- **Operator**: Materialize Kubernetes operator -- **Instance**: Single Materialize instance in `materialize-environment` namespace -- **Network Load Balancer**: Dedicated internal NLB for Materialize access (ports 6875, 6876, 8080) + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Network Load Balancer | Dedicated internal NLB for Materialize access (ports 6875, 6876, 8080) | --- @@ -111,7 +124,7 @@ Before running Terraform, create a `terraform.tfvars` file with the following va name_prefix = "simple-demo" aws_region = "us-east-1" aws_profile = "your-aws-profile" -license_key = "your-materialize-license-key" # Get from https://materialize.com/self-managed/ +license_key = "your-materialize-license-key" tags = { environment = "demo" } diff --git a/doc/user/content/installation/install-on-azure/_index.md b/doc/user/content/installation/install-on-azure/_index.md index be733f8a566ac..937c03e1973eb 100644 --- a/doc/user/content/installation/install-on-azure/_index.md +++ b/doc/user/content/installation/install-on-azure/_index.md @@ -15,8 +15,8 @@ aliases: | Guide | Description | |-------|-------------| -| [Terraform Provider](/installation/install-on-azure/terraform-module/) | Install Materialize on Azure using our new Unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-azure/legacy-terraform-module/) | Install Materialize on Azure using our Terraform Provider (legacy) | +| [Terraform Provider](/installation/install-on-azure/terraform-module/) | Install Materialize on Azure using our new unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-azure/legacy-terraform-module/) | Install Materialize on Azure using our legacy Terraform Provider | | [Appendix: Azure deployment guidelines](/installation/install-on-azure/appendix-deployment-guidelines/) | Additional guidelines for Azure deployments | ## See also diff --git a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md index cb3f16f1a01c9..b31df4552a7c0 100644 --- a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md @@ -13,8 +13,8 @@ menu: As a general guideline, we recommend: - ARM-based CPU -- A 1:8 ratio of vCPU to GiB memory is recommended. -- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. +- A 1:8 ratio of vCPU to GiB memory. +- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. ### Recommended Azure VM Types with Local NVMe Disks @@ -39,7 +39,7 @@ when the VM is stopped or deleted. ## Locally-attached NVMe storage -Configuring swap on nodes to using locally-attached NVMe storage allows +Configuring swap on nodes to use locally-attached NVMe storage allows Materialize to spill to disk when operating on datasets larger than main memory. This setup can provide significant cost savings and provides a more graceful degradation rather than OOMing. Network-attached storage (like EBS volumes) can @@ -62,7 +62,7 @@ With this change, the Terraform: See [Upgrade Notes](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#v061). {{< note >}} -If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} ## Recommended Azure Blob Storage diff --git a/doc/user/content/installation/install-on-azure/terraform-module/_index.md b/doc/user/content/installation/install-on-azure/terraform-module/_index.md index ca72c1f6a607b..8c147f3d73294 100644 --- a/doc/user/content/installation/install-on-azure/terraform-module/_index.md +++ b/doc/user/content/installation/install-on-azure/terraform-module/_index.md @@ -9,17 +9,16 @@ menu: --- Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for a production ready Materialize database. +deploy all services required for a Materialize to run on Azure cloud. The module is intended to provide a simple set of examples on how to deploy -materialize. It can be used as is or modules can be taken from the example and +Materialize. It can be used as is or modules can be taken from the example and integrated with existing DevOps tooling. -The repository can be found at: +The repository can be found at: ***[Materialize Terraform Self-Managed Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure)*** -***[Materialize Terraform Self-Managed Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure)*** - -Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) documentation for a full understanding -of the module structure and customizations. +For details on the module structure and customization, see: +* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) Also check out the [Azure deployment guide](/installation/install-on-azure/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. @@ -63,44 +62,63 @@ cd materialize-terraform-self-managed/azure/examples/simple This example provisions the following infrastructure: ### Resource Group -- **Resource Group**: New resource group to contain all resources + +| Resource | Description | +|----------|-------------| +| Resource Group | New resource group to contain all resources | ### Networking -- **Virtual Network**: 20.0.0.0/16 address space -- **AKS Subnet**: 20.0.0.0/20 with NAT Gateway association and service endpoints for Storage and SQL -- **PostgreSQL Subnet**: 20.0.16.0/24 delegated to PostgreSQL Flexible Server -- **NAT Gateway**: Standard SKU with static public IP for outbound connectivity -- **Private DNS Zone**: For PostgreSQL private endpoint resolution with VNet link + +| Resource | Description | +|----------|-------------| +| Virtual Network | 20.0.0.0/16 address space | +| AKS Subnet | 20.0.0.0/20 with NAT Gateway association and service endpoints for Storage and SQL | +| PostgreSQL Subnet | 20.0.16.0/24 delegated to PostgreSQL Flexible Server | +| NAT Gateway | Standard SKU with static public IP for outbound connectivity | +| Private DNS Zone | For PostgreSQL private endpoint resolution with VNet link | ### Compute -- **AKS Cluster**: Version 1.32 with Cilium networking (network plugin: azure, data plane: cilium, policy: cilium) -- **Default Node Pool**: Standard_D4pds_v6 VMs, autoscaling 2-5 nodes, labeled for generic workloads -- **Materialize Node Pool**: Standard_E4pds_v6 VMs with 100GB disk, autoscaling 2-5 nodes, swap enabled, dedicated taints for Materialize workloads -- **Managed Identities**: - - AKS cluster identity: Used by AKS control plane to provision Azure resources (creating load balancers when Materialize LoadBalancer services are created, managing network interfaces) - - Workload identity: Used by Materialize pods for secure, passwordless authentication to Azure Storage (no storage account keys stored in cluster) + +| Resource | Description | +|----------|-------------| +| AKS Cluster | Version 1.32 with Cilium networking (network plugin: azure, data plane: cilium, policy: cilium) | +| Default Node Pool | Standard_D4pds_v6 VMs, autoscaling 2-5 nodes, labeled for generic workloads | +| Materialize Node Pool | Standard_E4pds_v6 VMs with 100GB disk, autoscaling 2-5 nodes, swap enabled, dedicated taints for Materialize workloads | +| Managed Identities | AKS cluster identity (used by AKS control plane to provision Azure resources like load balancers and network interfaces) and Workload identity (used by Materialize pods for secure, passwordless authentication to Azure Storage) | ### Database -- **Azure PostgreSQL Flexible Server**: Version 15 -- **SKU**: GP_Standard_D2s_v3 (2 vCores, 4GB memory) -- **Storage**: 32GB with 7-day backup retention -- **Network Access**: Public Network Access is disabled, Private access only (no public endpoint) -- **Database**: `materialize` database pre-created + +| Resource | Description | +|----------|-------------| +| Azure PostgreSQL Flexible Server | Version 15 | +| SKU | GP_Standard_D2s_v3 (2 vCores, 4GB memory) | +| Storage | 32GB with 7-day backup retention | +| Network Access | Public Network Access is disabled, Private access only (no public endpoint) | +| Database | `materialize` database pre-created | ### Storage -- **Storage Account**: Premium BlockBlobStorage with LRS replication for Materialize persistence -- **Container**: `materialize` blob container -- **Access Control**: Workload Identity federation for Kubernetes service account (passwordless authentication via OIDC) -- **Network Access**: Currently allows all traffic (production deployments should restrict to AKS subnet only traffic) + +| Resource | Description | +|----------|-------------| +| Storage Account | Premium BlockBlobStorage with LRS replication for Materialize persistence | +| Container | `materialize` blob container | +| Access Control | Workload Identity federation for Kubernetes service account (passwordless authentication via OIDC) | +| Network Access | Currently allows all traffic (production deployments should restrict to AKS subnet only traffic) | ### Kubernetes Add-ons -- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal -- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +| Resource | Description | +|----------|-------------| +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | ### Materialize -- **Operator**: Materialize Kubernetes operator -- **Instance**: Single Materialize instance in `materialize-environment` namespace -- **Load Balancers**: Internal Azure Load Balancers for Materialize access + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Load Balancers | Internal Azure Load Balancers for Materialize access | --- @@ -115,7 +133,7 @@ subscription_id = "12345678-1234-1234-1234-123456789012" resource_group_name = "materialize-demo-rg" name_prefix = "simple-demo" location = "westus2" -license_key = "your-materialize-license-key" # Optional: Get from https://materialize.com/self-managed/ +license_key = "your-materialize-license-key" tags = { environment = "demo" } diff --git a/doc/user/content/installation/install-on-gcp/_index.md b/doc/user/content/installation/install-on-gcp/_index.md index c62a95c53eec1..70efeae9f08e6 100644 --- a/doc/user/content/installation/install-on-gcp/_index.md +++ b/doc/user/content/installation/install-on-gcp/_index.md @@ -16,8 +16,8 @@ menu: | Guide | Description | |-------|-------------| -| [Terraform Provider](/installation/install-on-gcp/terraform-module/) | Install Materialize on GCP using our new Unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-gcp/legacy-terraform-module/) | Install Materialize on GCP using our Terraform Provider (legacy) | +| [Terraform Provider](/installation/install-on-gcp/terraform-module/) | Install Materialize on GCP using our new unified Terraform Provider | +| [Terraform Provider (legacy)](/installation/install-on-gcp/legacy-terraform-module/) | Install Materialize on GCP using our legacy Terraform Provider | | [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) | Additional guidelines for Google Cloud deployments | ## See also diff --git a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md b/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md index 69eae78bce5cf..87cc1c9e2e783 100644 --- a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md +++ b/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md @@ -13,8 +13,8 @@ menu: As a general guideline, we recommend: - ARM-based CPU -- A 1:8 ratio of vCPU to GiB memory is recommended. -- When using swap, it is recommended to use a 8:1 ratio of GiB local instance storage to GiB Ram. +- A 1:8 ratio of vCPU to GiB memory. +- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. When operating on GCP in production, we recommend the following machine types that support local SSD attachment: @@ -64,7 +64,7 @@ type](https://cloud.google.com/compute/docs/disks/local-ssd#lssd_disk_options). ## Locally-attached NVMe storage -Configuring swap on nodes to using locally-attached NVMe storage allows +Configuring swap on nodes to use locally-attached NVMe storage allows Materialize to spill to disk when operating on datasets larger than main memory. This setup can provide significant cost savings and provides a more graceful degradation rather than OOMing. Network-attached storage (like EBS volumes) can @@ -87,7 +87,7 @@ With this change, the Terraform: See [Upgrade Notes](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#v061). {{< note >}} -If deploying `v25.2` Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26` this will be handled automatically. +If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} diff --git a/doc/user/content/installation/install-on-gcp/terraform-module/_index.md b/doc/user/content/installation/install-on-gcp/terraform-module/_index.md index 72acc7d697199..d4da589a4853b 100644 --- a/doc/user/content/installation/install-on-gcp/terraform-module/_index.md +++ b/doc/user/content/installation/install-on-gcp/terraform-module/_index.md @@ -9,19 +9,17 @@ menu: --- Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for a production ready Materialize database. +deploy all services required for Materialize to run on Google Cloud. The module is intended to provide a simple set of examples on how to deploy -materialize. It can be used as is or modules can be taken from the example and +Materialize. It can be used as is or modules can be taken from the example and integrated with existing DevOps tooling. -The repository can be found at: +The repository can be found at: ***[Materialize Terraform Self-Managed GCP](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp)*** -***[Materialize Terraform Self-Managed GCP](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp)*** -Please see the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) documentation for a full understanding -of the module structure and customizations. - -Also check out the [GCP deployment guide](/installation/install-on-gcp/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. +For details on the module structure and customization, see: +* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) {{< note >}} {{% self-managed/materialize-components-sentence %}} @@ -45,7 +43,7 @@ Also check out the [GCP deployment guide](/installation/install-on-gcp/appendix- #### License key -{{< include-md file="shared-content/license-key-required.md" >}} +{{< yaml-table data="self_managed/license_key" >}} --- @@ -65,41 +63,57 @@ cd materialize-terraform-self-managed/gcp/examples/simple This example provisions the following infrastructure: ### Networking -- **VPC Network**: Custom VPC with auto-create subnets disabled -- **Subnet**: 192.168.0.0/20 primary range with private Google access enabled -- **Secondary Ranges**: - - Pods: 192.168.64.0/18 - - Services: 192.168.128.0/20 -- **Cloud Router**: For NAT and routing configuration -- **Cloud NAT**: For outbound internet access from private nodes -- **VPC Peering**: Service networking connection for Cloud SQL private access + +| Resource | Description | +|----------|-------------| +| VPC Network | Custom VPC with auto-create subnets disabled | +| Subnet | 192.168.0.0/20 primary range with private Google access enabled | +| Secondary Ranges | Pods: 192.168.64.0/18, Services: 192.168.128.0/20 | +| Cloud Router | For NAT and routing configuration | +| Cloud NAT | For outbound internet access from private nodes | +| VPC Peering | Service networking connection for Cloud SQL private access | ### Compute -- **GKE Cluster**: Regional cluster with Workload Identity enabled -- **Generic Node Pool**: e2-standard-8 machines, autoscaling 2-5 nodes, 50GB disk, for general workloads -- **Materialize Node Pool**: n2-highmem-8 machines, autoscaling 2-5 nodes, 100GB disk, 1 local SSD, swap enabled, dedicated taints for Materialize workloads -- **Service Account**: GKE service account with workload identity binding + +| Resource | Description | +|----------|-------------| +| GKE Cluster | Regional cluster with Workload Identity enabled | +| Generic Node Pool | e2-standard-8 machines, autoscaling 2-5 nodes, 50GB disk, for general workloads | +| Materialize Node Pool | n2-highmem-8 machines, autoscaling 2-5 nodes, 100GB disk, 1 local SSD, swap enabled, dedicated taints for Materialize workloads | +| Service Account | GKE service account with workload identity binding | ### Database -- **Cloud SQL PostgreSQL**: Private IP only (no public IP) -- **Tier**: db-custom-2-4096 (2 vCPUs, 4GB memory) -- **Database**: `materialize` database with UTF8 charset -- **User**: `materialize` user with auto-generated password -- **Network**: Connected via VPC peering for private access + +| Resource | Description | +|----------|-------------| +| Cloud SQL PostgreSQL | Private IP only (no public IP) | +| Tier | db-custom-2-4096 (2 vCPUs, 4GB memory) | +| Database | `materialize` database with UTF8 charset | +| User | `materialize` user with auto-generated password | +| Network | Connected via VPC peering for private access | ### Storage -- **Cloud Storage Bucket**: Regional bucket for Materialize persistence -- **Access**: HMAC keys for S3-compatible access (Workload Identity service account with storage permissions is configured but not currently used by Materialize for GCS access, in future we will remove HMAC keys and support access to GCS either via Workload Identity Federation or via Kubernetes ServiceAccounts that impersonate IAM service accounts) -- **Versioning**: Disabled (for testing; enable in production) + +| Resource | Description | +|----------|-------------| +| Cloud Storage Bucket | Regional bucket for Materialize persistence | +| Access | HMAC keys for S3-compatible access (Workload Identity service account with storage permissions is configured but not currently used by Materialize for GCS access, in future we will remove HMAC keys and support access to GCS either via Workload Identity Federation or via Kubernetes ServiceAccounts that impersonate IAM service accounts) | +| Versioning | Disabled (for testing; enable in production) | ### Kubernetes Add-ons -- **cert-manager**: Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal -- **Self-signed ClusterIssuer**: Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. + +| Resource | Description | +|----------|-------------| +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | ### Materialize -- **Operator**: Materialize Kubernetes operator in `materialize` namespace -- **Instance**: Single Materialize instance in `materialize-environment` namespace -- **Load Balancers**: GCP Load Balancers for Materialize access + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator in `materialize` namespace | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Load Balancers | GCP Load Balancers for Materialize access | --- @@ -125,7 +139,7 @@ Before running Terraform, create a `terraform.tfvars` file with the following va project_id = "my-gcp-project" name_prefix = "simple-demo" region = "us-central1" -license_key = "your-materialize-license-key" # Optional: Get from https://materialize.com/self-managed/ +license_key = "your-materialize-license-key" labels = { environment = "demo" created_by = "terraform" diff --git a/doc/user/content/installation/install-on-local-kind/_index.md b/doc/user/content/installation/install-on-local-kind/_index.md index bb7bfb41c5af3..18cba888b1bb1 100644 --- a/doc/user/content/installation/install-on-local-kind/_index.md +++ b/doc/user/content/installation/install-on-local-kind/_index.md @@ -1,5 +1,5 @@ --- -title: "Install locally on kind (via Helm)" +title: "Install locally on kind" description: "Deploy Self-managed Materialize to a local kind cluster." aliases: - /self-hosted/install-on-local-kind/ diff --git a/doc/user/content/installation/upgrade-to-swap.md b/doc/user/content/installation/upgrade-to-swap.md index e25c788c3b643..fe6da754dd4b8 100644 --- a/doc/user/content/installation/upgrade-to-swap.md +++ b/doc/user/content/installation/upgrade-to-swap.md @@ -27,7 +27,7 @@ file="shared-content/self-managed/gcp-terraform-v0.6.1-upgrade-notes.md" >}}. - {{< include-md file="shared-content/self-managed/azure-terraform-v0.6.1-upgrade-notes.md" >}}. -See also [General notes for upgrades](/installation/#upgrade). +See also [Upgrade Overview](/installation/upgrading/). {{< /annotation >}} diff --git a/doc/user/content/installation/upgrading.md b/doc/user/content/installation/upgrading.md index 424ac916f7d69..e1f6abc55a8db 100644 --- a/doc/user/content/installation/upgrading.md +++ b/doc/user/content/installation/upgrading.md @@ -10,11 +10,10 @@ The following provides a general outline and examples for upgrading Materialize. For a more specific set of steps, please consult the deployment-specific upgrade documentation: - - [Minikube](/installation/install-on-local-minikube/upgrade-on-local-minikube/) - [Kind](/installation/install-on-local-kind/upgrade-on-local-kind/) - - [AWS](/installation/install-on-aws/legacy-terraform-module/upgrade/) - - [GCP](/installation/install-on-gcp/legacy-terraform-module/upgrade/) - - [Azure](/installation/install-on-azure/legacy-terraform-module/upgrade/) + - [AWS (legacy Terraform)](/installation/install-on-aws/legacy-terraform-module/upgrade/) + - [GCP (legacy Terraform)](/installation/install-on-gcp/legacy-terraform-module/upgrade/) + - [Azure (legacy Terraform)](/installation/install-on-azure/legacy-terraform-module/upgrade/) ***When upgrading always***: - Upgrade the operator first and ensure version compatibility between the operator and the Materialize instance you are upgrading to. @@ -164,22 +163,16 @@ kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize ``` ### Version Specific Upgrade Notes -#### Upgrading to `v26.0` -- This is a major version upgrade. In order to upgrade to `v26.0`, you must first upgrade to `v25.2.15`, then upgrade to `v26.0.0`. -- New requirements were introduced for license keys. In order to upgrade, you will - first need to add a license key to the `backendSecret` used in the spec for your - Materialize resource. Please refer to our [instructions on how to get and install a license keys](/installation/faq#how-do-i-get-a-license-key). -- Swap is now enabled by default. Swap reduces the memory required to - operate Materialize and improves cost efficiency. Upgrading to `v26.0` - requires some preparation to ensure kubernetes nodes are labeled - and configured correctly. Please refer to our guides: - - {{< yaml-table data="self_managed/enable_swap_upgrade_guides" >}} - +#### Upgrading to `v26.1` and later versions +{{< include-md file="shared-content/self-managed/upgrade-notes/v26.1.md" >}} #### Upgrading between minor versions less than `v26` - Prior to `v26`, you must upgrade at most one minor version at a time. For example, upgrading from `v25.1.5` to `v25.2.15` is permitted. +#### Upgrading between minor versions less than `v26` + - Prior to `v26`, you must upgrade at most one minor version at a time. For + example, upgrading from `v25.1.5` to `v25.2.16` is permitted. + ## See also - [Configuration](/installation/configuration/) diff --git a/doc/user/content/releases/_index.md b/doc/user/content/releases/_index.md index f5bceeb00d510..3cad2ecc32dba 100644 --- a/doc/user/content/releases/_index.md +++ b/doc/user/content/releases/_index.md @@ -146,7 +146,7 @@ use the new setting `rolloutStrategy` to specify either: - `WaitUntilReady` (*Default*) - `ImmediatelyPromoteCausingDowntime` -For more information, see [`rolloutStrategy`](/installation/#rollout-strategies). +For more information, see [`rolloutStrategy`](/installation/upgrading/#rollout-strategies). ### Terraform helpers @@ -195,7 +195,7 @@ See also Upgrade Notes for release specific notes. {{< include-md file="shared-content/self-managed/upgrade-notes/v26.0.md" >}} -See also [General notes for upgrades](/installation/#general-notes-for-upgrades). +See also [Version specific upgrade notes](/installation/upgrading/#version-specific-upgrade-notes). ## See also diff --git a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html index 83c84ec02eca3..50ec7c43e0ff5 100644 --- a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html @@ -6,4 +6,4 @@ For more configuration options, you can use the `main.tf` file at the [root of the repository](https://github.com/MaterializeInc/terraform-aws-materialize/) instead. When running with the root `main.tf`, see [AWS required -configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/). \ No newline at end of file +configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/). diff --git a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html index 85e18a616dae6..f6da453b4ef44 100644 --- a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html @@ -7,4 +7,4 @@ the repository](https://github.com/MaterializeInc/terraform-azurerm-materialize/) instead. When running with the root `main.tf`, see [Azure required -configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/). \ No newline at end of file +configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/). diff --git a/doc/user/shared-content/self-managed/general-rules-for-upgrades.md b/doc/user/shared-content/self-managed/general-rules-for-upgrades.md index bf9b29bbdb4d0..3918b65011f2d 100644 --- a/doc/user/shared-content/self-managed/general-rules-for-upgrades.md +++ b/doc/user/shared-content/self-managed/general-rules-for-upgrades.md @@ -7,4 +7,4 @@ Whe upgrading: ensure compatibility. - **Always check** the [version specific upgrade - notes](/installation/#version-specific-upgrade-notes). + notes](/installation/upgrading/#version-specific-upgrade-notes). diff --git a/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md b/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md index 555c283f3a6a5..946aaeb166d4a 100644 --- a/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md +++ b/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md @@ -15,7 +15,7 @@ Materialize-provided Terraforms. {{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" >}} -See also [General notes for upgrades](/installation/#general-notes-for-upgrades) +See also [Upgrade Overview](/installation/upgrading/) {{< /tip >}} 1. Label existing scratchfs/lgalloc node groups. diff --git a/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md b/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md index 3d6dafa40feb1..bda5cb0245b66 100644 --- a/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md +++ b/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md @@ -8,7 +8,7 @@ - `ImmediatelyPromoteCausingDowntime` For more information, see - [`rolloutStrategy`](/installation/#rollout-strategies). + [`rolloutStrategy`](/installation/upgrading/#rollout-strategies). - New requirements were introduced for [license keys](/releases/#license-key). To upgrade, you will first need to add a license key to the `backendSecret` From 589f1f205f6d48c8868073b8c5d12753cedb3910 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Mon, 8 Dec 2025 11:10:40 -0500 Subject: [PATCH 03/11] some reorg + tweaks (part 1: install + broad reorg) --- doc/user/content/get-started/install.md | 12 +- doc/user/content/installation/_index.md | 21 -- .../appendix-aws-production-guide.md | 19 - ...ndix-materialize-crd-field-descriptions.md | 11 - .../installation/appendix-terraforms.md | 60 --- .../installation/install-on-aws/_index.md | 27 -- .../legacy-terraform-module/_index.md | 23 -- .../install-on-aws/terraform-module/_index.md | 161 -------- .../installation/install-on-azure/_index.md | 26 -- .../legacy-terraform-module/_index.md | 23 -- .../terraform-module/_index.md | 172 --------- .../installation/install-on-gcp/_index.md | 27 -- .../legacy-terraform-module/_index.md | 24 -- .../install-on-gcp/terraform-module/_index.md | 178 --------- .../installation/operational-guidelines.md | 60 --- doc/user/content/releases/_index.md | 9 +- .../security/self-managed/authentication.md | 2 +- .../self-managed-deployments/_index.md | 347 ++++++++++++++++++ .../appendix/_index.md | 18 + .../appendix}/appendix-cluster-sizes.md | 5 +- .../appendix}/configuration.md | 6 +- .../appendix/legacy/_index.md | 11 + .../appendix-configuration-legacy-aws.md} | 8 +- .../appendix-configuration-legacy-azure.md} | 8 +- .../appendix-configuration-legacy-gcp.md} | 8 +- .../appendix-legacy-terraform-releases.md | 38 ++ .../materialize-crd-field-descriptions.md | 13 + .../appendix}/upgrade-to-swap.md | 8 +- .../deployment-guidelines/_index.md | 18 + .../aws-deployment-guidelines.md} | 38 +- .../azure-deployment-guidelines.md} | 41 ++- .../gcp-deployment-guidelines.md} | 35 +- .../faq.md | 7 +- .../installation/_index.md | 25 ++ .../installation/install-on-aws.md | 275 ++++++++++++++ .../installation/install-on-azure.md | 274 ++++++++++++++ .../installation/install-on-gcp.md | 288 +++++++++++++++ .../installation/install-on-local-kind.md} | 2 +- .../installation/legacy/_index.md | 12 + .../legacy/install-on-aws-legacy.md} | 29 +- .../legacy/install-on-azure-legacy.md} | 10 +- .../legacy/install-on-gcp-legacy.md} | 15 +- .../release-versions.md | 4 +- .../troubleshooting.md | 10 +- .../upgrading/_index.md} | 132 ++++--- .../upgrading/legacy/_index.md | 12 + .../legacy/upgrade-on-aws-legacy.md} | 8 +- .../legacy/upgrade-on-azure-legacy.md} | 13 +- .../legacy/upgrade-on-gcp-legacy.md} | 20 +- .../upgrading/upgrade-on-aws.md | 240 ++++++++++++ .../upgrading/upgrade-on-kind.md} | 16 +- .../aws_terraform_deployed_components.yml | 6 +- .../azure_terraform_deployed_components.yml | 6 +- doc/user/data/self_managed/default_ports.yml | 11 + .../gcp_terraform_deployed_components.yml | 6 +- doc/user/data/self_managed/installation.yml | 76 ++++ doc/user/data/self_managed/terraform_list.yml | 12 +- ...orm_list.yml => terraform_list_legacy.yml} | 14 +- doc/user/data/self_managed/upgrades.yml | 39 ++ .../self-managed/aws-terraform-configs.html | 2 +- .../self-managed/azure-terraform-configs.html | 2 +- .../self-managed/gcp-terraform-configs.html | 2 +- .../shortcodes/self-managed/next-steps.html | 2 +- ...leshoot-console-mz_catalog_server_blurb.md | 2 +- .../versions/get-latest-version.html | 3 + ...l-helm-version-local-minikube-install.html | 13 - .../shared-content/rbac-sm/enable-rbac.md | 7 +- .../general-rules-for-upgrades.md | 12 +- .../self-managed/install-landing-page.md | 31 -- .../self-managed/prepare-nodes-and-upgrade.md | 2 +- .../self-managed/upgrade-notes/v26.0.md | 4 +- 71 files changed, 1988 insertions(+), 1113 deletions(-) delete mode 100644 doc/user/content/installation/_index.md delete mode 100644 doc/user/content/installation/appendix-aws-production-guide.md delete mode 100644 doc/user/content/installation/appendix-materialize-crd-field-descriptions.md delete mode 100644 doc/user/content/installation/appendix-terraforms.md delete mode 100644 doc/user/content/installation/install-on-aws/_index.md delete mode 100644 doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md delete mode 100644 doc/user/content/installation/install-on-aws/terraform-module/_index.md delete mode 100644 doc/user/content/installation/install-on-azure/_index.md delete mode 100644 doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md delete mode 100644 doc/user/content/installation/install-on-azure/terraform-module/_index.md delete mode 100644 doc/user/content/installation/install-on-gcp/_index.md delete mode 100644 doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md delete mode 100644 doc/user/content/installation/install-on-gcp/terraform-module/_index.md delete mode 100644 doc/user/content/installation/operational-guidelines.md create mode 100644 doc/user/content/self-managed-deployments/_index.md create mode 100644 doc/user/content/self-managed-deployments/appendix/_index.md rename doc/user/content/{installation => self-managed-deployments/appendix}/appendix-cluster-sizes.md (93%) rename doc/user/content/{installation => self-managed-deployments/appendix}/configuration.md (90%) create mode 100644 doc/user/content/self-managed-deployments/appendix/legacy/_index.md rename doc/user/content/{installation/install-on-aws/legacy-terraform-module/appendix-configuration.md => self-managed-deployments/appendix/legacy/appendix-configuration-legacy-aws.md} (93%) rename doc/user/content/{installation/install-on-azure/legacy-terraform-module/appendix-configuration.md => self-managed-deployments/appendix/legacy/appendix-configuration-legacy-azure.md} (95%) rename doc/user/content/{installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md => self-managed-deployments/appendix/legacy/appendix-configuration-legacy-gcp.md} (94%) create mode 100644 doc/user/content/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases.md create mode 100644 doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md rename doc/user/content/{installation => self-managed-deployments/appendix}/upgrade-to-swap.md (83%) create mode 100644 doc/user/content/self-managed-deployments/deployment-guidelines/_index.md rename doc/user/content/{installation/install-on-aws/appendix-deployment-guidelines.md => self-managed-deployments/deployment-guidelines/aws-deployment-guidelines.md} (63%) rename doc/user/content/{installation/install-on-azure/appendix-deployment-guidelines.md => self-managed-deployments/deployment-guidelines/azure-deployment-guidelines.md} (76%) rename doc/user/content/{installation/install-on-gcp/appendix-deployment-guidelines.md => self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines.md} (87%) rename doc/user/content/{installation => self-managed-deployments}/faq.md (81%) create mode 100644 doc/user/content/self-managed-deployments/installation/_index.md create mode 100644 doc/user/content/self-managed-deployments/installation/install-on-aws.md create mode 100644 doc/user/content/self-managed-deployments/installation/install-on-azure.md create mode 100644 doc/user/content/self-managed-deployments/installation/install-on-gcp.md rename doc/user/content/{installation/install-on-local-kind/_index.md => self-managed-deployments/installation/install-on-local-kind.md} (99%) create mode 100644 doc/user/content/self-managed-deployments/installation/legacy/_index.md rename doc/user/content/{installation/install-on-aws/legacy-terraform-module/install.md => self-managed-deployments/installation/legacy/install-on-aws-legacy.md} (95%) rename doc/user/content/{installation/install-on-azure/legacy-terraform-module/install.md => self-managed-deployments/installation/legacy/install-on-azure-legacy.md} (98%) rename doc/user/content/{installation/install-on-gcp/legacy-terraform-module/install.md => self-managed-deployments/installation/legacy/install-on-gcp-legacy.md} (97%) rename doc/user/content/{installation => self-managed-deployments}/release-versions.md (71%) rename doc/user/content/{installation => self-managed-deployments}/troubleshooting.md (95%) rename doc/user/content/{installation/upgrading.md => self-managed-deployments/upgrading/_index.md} (50%) create mode 100644 doc/user/content/self-managed-deployments/upgrading/legacy/_index.md rename doc/user/content/{installation/install-on-aws/legacy-terraform-module/upgrade.md => self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy.md} (93%) rename doc/user/content/{installation/install-on-azure/legacy-terraform-module/upgrade.md => self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md} (95%) rename doc/user/content/{installation/install-on-gcp/legacy-terraform-module/upgrade.md => self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md} (93%) create mode 100644 doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md rename doc/user/content/{installation/install-on-local-kind/upgrade-on-local-kind.md => self-managed-deployments/upgrading/upgrade-on-kind.md} (80%) create mode 100644 doc/user/data/self_managed/default_ports.yml create mode 100644 doc/user/data/self_managed/installation.yml rename doc/user/data/self_managed/{legacy_terraform_list.yml => terraform_list_legacy.yml} (50%) create mode 100644 doc/user/data/self_managed/upgrades.yml create mode 100644 doc/user/layouts/shortcodes/self-managed/versions/get-latest-version.html delete mode 100644 doc/user/layouts/shortcodes/self-managed/versions/step-install-helm-version-local-minikube-install.html delete mode 100644 doc/user/shared-content/self-managed/install-landing-page.md diff --git a/doc/user/content/get-started/install.md b/doc/user/content/get-started/install.md index 6622e519a2c9b..8b28c07556d95 100644 --- a/doc/user/content/get-started/install.md +++ b/doc/user/content/get-started/install.md @@ -12,4 +12,14 @@ aliases: - /self-managed/v25.1/get-started/install/ --- -{{< include-md file="shared-content/self-managed/install-landing-page.md" >}} +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-intro" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-helm" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-unified" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-legacy" %}} diff --git a/doc/user/content/installation/_index.md b/doc/user/content/installation/_index.md deleted file mode 100644 index 17dc5f3ffa101..0000000000000 --- a/doc/user/content/installation/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Self-Managed Deployments" -description: "Installation and upgrade guides for Self-Managed Materialize." -disable_list: true -menu: - main: - identifier: "installation" - weight: 5 -aliases: - - /self-managed/v25.1/installation/ ---- - -{{< include-md file="shared-content/self-managed/install-landing-page.md" >}} - - -## See also - -- [Materialize Operator Configuration](/installation/configuration/) -- [Upgrade Guide Configuration](/installation/upgrading/) -- [Troubleshooting](/installation/troubleshooting/) -- [FAQ](/installation/faq/) diff --git a/doc/user/content/installation/appendix-aws-production-guide.md b/doc/user/content/installation/appendix-aws-production-guide.md deleted file mode 100644 index 2049efd0a2543..0000000000000 --- a/doc/user/content/installation/appendix-aws-production-guide.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "Production guide" -description: "Production guide for Self-Managed Materialize AWS deployment." -draft: true - ---- - -The sample Materialize on AWS Terraform module are provided as a starting point -for evaluation. The Terraform module: - -- Does not use encryption for object storage. -- Does not use TLS for traffic. -- Does not implement RBAC (Role-Based Access Control). -- Does not include explicit OS version upgrade logic. -- Does not include Kubernetes upgrade logic. -- Relies on port forwarding for access. - - - diff --git a/doc/user/content/installation/appendix-materialize-crd-field-descriptions.md b/doc/user/content/installation/appendix-materialize-crd-field-descriptions.md deleted file mode 100644 index d674786dfbdad..0000000000000 --- a/doc/user/content/installation/appendix-materialize-crd-field-descriptions.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Appendix: Materialize CRD Field Descriptions" -description: "Reference page on Materialize CRD Fields" -menu: - main: - parent: "installation" - identifier: "appendix-materialize-crd-field-descriptions" - weight: 97 ---- - -{{% self-managed/materialize-crd-descriptions %}} diff --git a/doc/user/content/installation/appendix-terraforms.md b/doc/user/content/installation/appendix-terraforms.md deleted file mode 100644 index de7ea46f42952..0000000000000 --- a/doc/user/content/installation/appendix-terraforms.md +++ /dev/null @@ -1,60 +0,0 @@ - ---- -title: "Terraform Modules" -description: "List of template Terraform modules that are available as a -starting point." -menu: - main: - parent: "installation" - identifier: "appendix-terraforms" - weight: 95 ---- - -To help you get started, Materialize provides Terraform modules. - -{{< important >}} -These modules are intended for evaluation/demonstration purposes and for serving -as a template when building your own production deployment. The modules should -not be directly relied upon for production deployments: **future releases of the -modules will contain breaking changes.** Instead, to use as a starting point for -your own production deployment, either: - -- Fork the repo and pin to a specific version; or - -- Use the code as a reference when developing your own deployment. - -{{}} - -### **Terraform Modules** - -Materialize provides a [**unified Terraform module**](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main?tab=readme-ov-file#materialize-self-managed-terraform-modules) in order to provide concrete examples and an opinionated model for deploying materialize. -This module supports deployments for AWS - -{{< yaml-table data="self_managed/terraform_list" >}} - -### *Legacy Terraform Modules* - -{{< yaml-table data="self_managed/legacy_terraform_list" >}} - -#### Materialize on AWS Terraform module - -{{< yaml-table data="self_managed/aws_terraform_versions" >}} - -{{% self-managed/aws-terraform-upgrade-notes %}} - -See also [Upgrade Notes]( -https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#upgrade-notes) -for release-specific upgrade notes. - - -#### Materialize on Azure Terraform module - -{{< yaml-table data="self_managed/azure_terraform_versions" >}} - -{{% self-managed/azure-terraform-upgrade-notes %}} - -### Materialize on GCP Terraform module - -{{< yaml-table data="self_managed/gcp_terraform_versions" >}} - -{{% self-managed/gcp-terraform-upgrade-notes %}} diff --git a/doc/user/content/installation/install-on-aws/_index.md b/doc/user/content/installation/install-on-aws/_index.md deleted file mode 100644 index 0919ee64f2fea..0000000000000 --- a/doc/user/content/installation/install-on-aws/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Install on AWS" -description: "Install and upgrade Materialize on AWS" -aliases: - - /self-hosted/install-on-aws/ - - /self-managed/v25.1/installation/install-on-aws/ -disable_list: true -menu: - main: - parent: "installation" - identifier: "install-on-aws" - weight: 20 ---- - -{{% self-managed/materialize-components-sentence %}} - -| Guide | Description | -|-------|-------------| -| [Terraform Provider](/installation/install-on-aws/terraform-module/) | Install Materialize on AWS using our new unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-aws/legacy-terraform-module/) | Install Materialize on AWS using our legacy Terraform Provider | -| [Appendix: AWS deployment guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) | Additional guidelines for AWS deployments | - -## See also - -- [Materialize Operator Configuration](/installation/configuration/) -- [Troubleshooting](/installation/troubleshooting/) -- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md deleted file mode 100644 index 8c91d1c7a569c..0000000000000 --- a/doc/user/content/installation/install-on-aws/legacy-terraform-module/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Terraform Module (legacy)" -description: "" -disable_list: true -disable_toc: true -menu: - main: - parent: "install-on-aws" - identifier: "install-on-aws-legacy-terraform-module" - weight: 5 - - ---- - -The tutorials in this section show you how to deploy Materialize using the [Materialize on AWS Legacy Terraform -module](https://github.com/MaterializeInc/terraform-aws-materialize). - - -| Guide | Description | -|-------|-------------| -| [Install](/installation/install-on-aws/legacy-terraform-module/install/) | Install Materialize on AWS | -| [Upgrade](/installation/install-on-aws/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on AWS | -| [Appendix: AWS configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/) | Configuration for AWS deployments | diff --git a/doc/user/content/installation/install-on-aws/terraform-module/_index.md b/doc/user/content/installation/install-on-aws/terraform-module/_index.md deleted file mode 100644 index f2f09e4007525..0000000000000 --- a/doc/user/content/installation/install-on-aws/terraform-module/_index.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: "Terraform Module" -description: "" -menu: - main: - parent: "install-on-aws" - identifier: "install-aws-terraform" - weight: 5 ---- - -Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for Materialize to run on AWS. -The module is intended to provide a simple set of examples on how to deploy -Materialize. It can be used as is or modules can be taken from the example and -integrated with existing DevOps tooling. - -For details on the module structure and customization, see: -* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) -* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) - -Also check out the [AWS deployment guide](/installation/install-on-aws/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. - -{{< note >}} -{{% self-managed/materialize-components-sentence %}} -{{< /note >}} - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - - -## Prerequisites - -- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) -- [AWS Cli ](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -- [`kubectl`](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) - -#### License key - -{{< yaml-table data="self_managed/license_key" >}} - ---- - -# Example: Simple Materialize Deployment on AWS - -This example demonstrates how to deploy a complete Materialize environment on AWS using the modular Terraform setup from this repository. - - -## Setup -```shell -git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git -cd materialize-terraform-self-managed/aws/examples/simple -```` - - -## What Gets Created - -This example provisions the following infrastructure: - -### Networking - -| Resource | Description | -|----------|-------------| -| VPC | 10.0.0.0/16 with DNS hostnames and support enabled | -| Subnets | 3 private subnets (10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24) and 3 public subnets (10.0.101.0/24, 10.0.102.0/24, 10.0.103.0/24) across availability zones us-east-1a, us-east-1b, us-east-1c | -| NAT Gateway | Single NAT Gateway for all private subnets | -| Internet Gateway | For public subnet connectivity | - -### Compute - -| Resource | Description | -|----------|-------------| -| EKS Cluster | Version 1.32 with CloudWatch logging (API, audit) | -| Base Node Group | 2 nodes (t4g.medium) for Karpenter and CoreDNS | -| Karpenter | Auto-scaling controller with two node classes: Generic nodepool (t4g.xlarge instances for general workloads) and Materialize nodepool (r7gd.2xlarge instances with swap enabled and dedicated taints to run materialize instance workloads) | - -### Database - -| Resource | Description | -|----------|-------------| -| RDS PostgreSQL | Version 15, db.t3.large instance | -| Storage | 50GB allocated, autoscaling up to 100GB | -| Deployment | Single-AZ (non-production configuration) | -| Backups | 7-day retention | -| Security | Dedicated security group with access from EKS cluster and nodes | - -### Storage - -| Resource | Description | -|----------|-------------| -| S3 Bucket | Dedicated bucket for Materialize persistence | -| Encryption | Disabled (for testing; enable in production) | -| Versioning | Disabled (for testing; enable in production) | -| IAM Role | IRSA role for Kubernetes service account access | - -### Kubernetes Add-ons - -| Resource | Description | -|----------|-------------| -| AWS Load Balancer Controller | For managing Network Load Balancers | -| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | -| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | - -### Materialize - -| Resource | Description | -|----------|-------------| -| Operator | Materialize Kubernetes operator | -| Instance | Single Materialize instance in `materialize-environment` namespace | -| Network Load Balancer | Dedicated internal NLB for Materialize access (ports 6875, 6876, 8080) | - ---- - -## Getting Started - -### Step 1: Set Required Variables - -Before running Terraform, create a `terraform.tfvars` file with the following variables: - -```hcl -name_prefix = "simple-demo" -aws_region = "us-east-1" -aws_profile = "your-aws-profile" -license_key = "your-materialize-license-key" -tags = { - environment = "demo" -} -``` - -**Required Variables:** -- `name_prefix`: Prefix for all resource names -- `aws_region`: AWS region for deployment -- `aws_profile`: AWS CLI profile to use -- `tags`: Map of tags to apply to resources -- `license_key`: Materialize license key - ---- - -### Step 2: Deploy Materialize - -Run the usual Terraform workflow: - -```bash -terraform init -terraform apply -``` - ---- - -## Notes - -* You can customize each module independently. -* To reduce cost in your demo environment, you can tweak subnet CIDRs and instance types in `main.tf`. - -***Don't forget to destroy resources when finished:*** -```bash -terraform destroy -``` diff --git a/doc/user/content/installation/install-on-azure/_index.md b/doc/user/content/installation/install-on-azure/_index.md deleted file mode 100644 index 937c03e1973eb..0000000000000 --- a/doc/user/content/installation/install-on-azure/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Install on Azure" -description: "Install and upgrade Materialize on Azure" -disable_list: true -menu: - main: - parent: "installation" - identifier: "install-on-azure" - weight: 30 -aliases: - - /self-managed/v25.1/installation/install-on-azure/ ---- - -{{% self-managed/materialize-components-sentence blobstorage="blob storage; specifically **block** blob storage on Azure" %}} - -| Guide | Description | -|-------|-------------| -| [Terraform Provider](/installation/install-on-azure/terraform-module/) | Install Materialize on Azure using our new unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-azure/legacy-terraform-module/) | Install Materialize on Azure using our legacy Terraform Provider | -| [Appendix: Azure deployment guidelines](/installation/install-on-azure/appendix-deployment-guidelines/) | Additional guidelines for Azure deployments | - -## See also - -- [Materialize Operator Configuration](/installation/configuration/) -- [Troubleshooting](/installation/troubleshooting/) -- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md deleted file mode 100644 index 9588822487a23..0000000000000 --- a/doc/user/content/installation/install-on-azure/legacy-terraform-module/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Terraform Module (legacy)" -description: "" -disable_list: true -disable_toc: true -menu: - main: - parent: "install-on-azure" - identifier: "install-on-azure-legacy-terraform-module" - weight: 5 - - ---- - -The tutorials in this section show you how to deploy Materialize using the [Materialize on Azure Legacy Terraform -modules](https://github.com/MaterializeInc/terraform-azurerm-materialize). - - -| Guide | Description | -|-------|-------------| -| [Install](/installation/install-on-azure/legacy-terraform-module/install/) | Install Materialize on Azure | -| [Upgrade](/installation/install-on-azure/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on Azure | -| [Appendix: Azure configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/) | Configuration for Azure deployments | diff --git a/doc/user/content/installation/install-on-azure/terraform-module/_index.md b/doc/user/content/installation/install-on-azure/terraform-module/_index.md deleted file mode 100644 index 8c147f3d73294..0000000000000 --- a/doc/user/content/installation/install-on-azure/terraform-module/_index.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Terraform Module" -description: "" -menu: - main: - parent: "install-on-azure" - identifier: "install-azure-terraform" - weight: 5 ---- - -Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for a Materialize to run on Azure cloud. -The module is intended to provide a simple set of examples on how to deploy -Materialize. It can be used as is or modules can be taken from the example and -integrated with existing DevOps tooling. - -The repository can be found at: ***[Materialize Terraform Self-Managed Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure)*** - -For details on the module structure and customization, see: -* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) -* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) - -Also check out the [Azure deployment guide](/installation/install-on-azure/appendix-deployment-guidelines/) for details on recommended instance sizing and configuration. - -{{< note >}} -{{% self-managed/materialize-components-sentence %}} -{{< /note >}} - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - - -## Prerequisites - -- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) -- [Azure Cli ](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) -- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) - -#### License key - -{{< include-md file="shared-content/license-key-required.md" >}} - ---- - -# Example: Simple Materialize Deployment on Azure - -This example demonstrates how to deploy a complete Materialize environment on Azure using the modular Terraform setup from this repository. - - -## Setup -```shell -git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git -cd materialize-terraform-self-managed/azure/examples/simple -```` - -## What Gets Created - -This example provisions the following infrastructure: - -### Resource Group - -| Resource | Description | -|----------|-------------| -| Resource Group | New resource group to contain all resources | - -### Networking - -| Resource | Description | -|----------|-------------| -| Virtual Network | 20.0.0.0/16 address space | -| AKS Subnet | 20.0.0.0/20 with NAT Gateway association and service endpoints for Storage and SQL | -| PostgreSQL Subnet | 20.0.16.0/24 delegated to PostgreSQL Flexible Server | -| NAT Gateway | Standard SKU with static public IP for outbound connectivity | -| Private DNS Zone | For PostgreSQL private endpoint resolution with VNet link | - -### Compute - -| Resource | Description | -|----------|-------------| -| AKS Cluster | Version 1.32 with Cilium networking (network plugin: azure, data plane: cilium, policy: cilium) | -| Default Node Pool | Standard_D4pds_v6 VMs, autoscaling 2-5 nodes, labeled for generic workloads | -| Materialize Node Pool | Standard_E4pds_v6 VMs with 100GB disk, autoscaling 2-5 nodes, swap enabled, dedicated taints for Materialize workloads | -| Managed Identities | AKS cluster identity (used by AKS control plane to provision Azure resources like load balancers and network interfaces) and Workload identity (used by Materialize pods for secure, passwordless authentication to Azure Storage) | - -### Database - -| Resource | Description | -|----------|-------------| -| Azure PostgreSQL Flexible Server | Version 15 | -| SKU | GP_Standard_D2s_v3 (2 vCores, 4GB memory) | -| Storage | 32GB with 7-day backup retention | -| Network Access | Public Network Access is disabled, Private access only (no public endpoint) | -| Database | `materialize` database pre-created | - -### Storage - -| Resource | Description | -|----------|-------------| -| Storage Account | Premium BlockBlobStorage with LRS replication for Materialize persistence | -| Container | `materialize` blob container | -| Access Control | Workload Identity federation for Kubernetes service account (passwordless authentication via OIDC) | -| Network Access | Currently allows all traffic (production deployments should restrict to AKS subnet only traffic) | - -### Kubernetes Add-ons - -| Resource | Description | -|----------|-------------| -| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | -| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | - -### Materialize - -| Resource | Description | -|----------|-------------| -| Operator | Materialize Kubernetes operator | -| Instance | Single Materialize instance in `materialize-environment` namespace | -| Load Balancers | Internal Azure Load Balancers for Materialize access | - ---- - -## Getting Started - -### Step 1: Set Required Variables - -Before running Terraform, create a `terraform.tfvars` file with the following variables: - -```hcl -subscription_id = "12345678-1234-1234-1234-123456789012" -resource_group_name = "materialize-demo-rg" -name_prefix = "simple-demo" -location = "westus2" -license_key = "your-materialize-license-key" -tags = { - environment = "demo" -} -``` - -**Required Variables:** -- `subscription_id`: Azure subscription ID -- `resource_group_name`: Name for the resource group (will be created) -- `name_prefix`: Prefix for all resource names -- `location`: Azure region for deployment -- `tags`: Map of tags to apply to resources -- `license_key`: Materialize license key - ---- - -### Step 2: Deploy Materialize - -Run the usual Terraform workflow: - -```bash -terraform init -terraform apply -``` - -## Notes - -*Autoscaling: Uses Azure's native cluster autoscaler that integrates directly with Azure Virtual Machine Scale Sets for automated node scaling. In future we are planning to enhance this by making use of karpenter-provider-azure* - -* You can customize each module independently. -* To reduce cost in your demo environment, you can tweak VM sizes and database tiers in `main.tf`. - -***Don't forget to destroy resources when finished:*** - -```bash -terraform destroy -``` diff --git a/doc/user/content/installation/install-on-gcp/_index.md b/doc/user/content/installation/install-on-gcp/_index.md deleted file mode 100644 index 70efeae9f08e6..0000000000000 --- a/doc/user/content/installation/install-on-gcp/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Install on GCP" -description: "Install and upgrade Materialize on GCP" -aliases: - - /self-hosted/install-on-gcp/ - - /self-managed/v25.1/installation/install-on-gcp/ -disable_list: true -menu: - main: - parent: "installation" - identifier: "install-on-gcp" - weight: 40 ---- - -{{% self-managed/materialize-components-sentence %}} - -| Guide | Description | -|-------|-------------| -| [Terraform Provider](/installation/install-on-gcp/terraform-module/) | Install Materialize on GCP using our new unified Terraform Provider | -| [Terraform Provider (legacy)](/installation/install-on-gcp/legacy-terraform-module/) | Install Materialize on GCP using our legacy Terraform Provider | -| [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) | Additional guidelines for Google Cloud deployments | - -## See also - -- [Materialize Operator Configuration](/installation/configuration/) -- [Troubleshooting](/installation/troubleshooting/) -- [Installation](/installation/) diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md b/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md deleted file mode 100644 index 04a23c89bb532..0000000000000 --- a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Terraform Module (legacy)" -description: "" -disable_list: true -disable_toc: true -menu: - main: - parent: "install-on-gcp" - identifier: "install-on-gcp-legacy-terraform-module" - weight: 5 - - ---- - -The tutorials in this section show you how to deploy Materialize using the [Materialize on Google -Cloud Module Legacy Terraform -module](https://github.com/MaterializeInc/terraform-google-materialize). - - -| Guide | Description | -|-------|-------------| -| [Install](/installation/install-on-gcp/legacy-terraform-module/install/) | Install Materialize on GCP | -| [Upgrade](/installation/install-on-gcp/legacy-terraform-module/upgrade/) | Upgrade your Materialize deployment on GCP | -| [Appendix: GCP configuration](/installation/install-on-gcp/legacy-terraform-module/appendix-configuration/) | Configuration for Google Cloud deployments | diff --git a/doc/user/content/installation/install-on-gcp/terraform-module/_index.md b/doc/user/content/installation/install-on-gcp/terraform-module/_index.md deleted file mode 100644 index d4da589a4853b..0000000000000 --- a/doc/user/content/installation/install-on-gcp/terraform-module/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Terraform Module" -description: "" -menu: - main: - parent: "install-on-gcp" - identifier: "install-gcp-terraform" - weight: 5 ---- - -Materialize provides a set of modular Terraform modules that can be used to -deploy all services required for Materialize to run on Google Cloud. -The module is intended to provide a simple set of examples on how to deploy -Materialize. It can be used as is or modules can be taken from the example and -integrated with existing DevOps tooling. - -The repository can be found at: ***[Materialize Terraform Self-Managed GCP](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp)*** - - -For details on the module structure and customization, see: -* [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) -* [cloud specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) - -{{< note >}} -{{% self-managed/materialize-components-sentence %}} -{{< /note >}} - -{{< warning >}} - -{{< self-managed/terraform-disclaimer >}} - -{{< /warning >}} - - -## Prerequisites - -- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) -- [GCloud Cli](https://cloud.google.com/sdk/docs/install) -- [`kubectl`](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) -- [kubectl gke plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) -- [a Google Cloud service account](https://cloud.google.com/iam/docs/service-accounts-create#creating) - -#### License key - -{{< yaml-table data="self_managed/license_key" >}} - ---- - -# Example: Simple Materialize Deployment on GCP - -This example demonstrates how to deploy a complete Materialize environment on GCP using the modular Terraform setup from this repository. - - -## Setup -```shell -git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git -cd materialize-terraform-self-managed/gcp/examples/simple -```` - -## What Gets Created - -This example provisions the following infrastructure: - -### Networking - -| Resource | Description | -|----------|-------------| -| VPC Network | Custom VPC with auto-create subnets disabled | -| Subnet | 192.168.0.0/20 primary range with private Google access enabled | -| Secondary Ranges | Pods: 192.168.64.0/18, Services: 192.168.128.0/20 | -| Cloud Router | For NAT and routing configuration | -| Cloud NAT | For outbound internet access from private nodes | -| VPC Peering | Service networking connection for Cloud SQL private access | - -### Compute - -| Resource | Description | -|----------|-------------| -| GKE Cluster | Regional cluster with Workload Identity enabled | -| Generic Node Pool | e2-standard-8 machines, autoscaling 2-5 nodes, 50GB disk, for general workloads | -| Materialize Node Pool | n2-highmem-8 machines, autoscaling 2-5 nodes, 100GB disk, 1 local SSD, swap enabled, dedicated taints for Materialize workloads | -| Service Account | GKE service account with workload identity binding | - -### Database - -| Resource | Description | -|----------|-------------| -| Cloud SQL PostgreSQL | Private IP only (no public IP) | -| Tier | db-custom-2-4096 (2 vCPUs, 4GB memory) | -| Database | `materialize` database with UTF8 charset | -| User | `materialize` user with auto-generated password | -| Network | Connected via VPC peering for private access | - -### Storage - -| Resource | Description | -|----------|-------------| -| Cloud Storage Bucket | Regional bucket for Materialize persistence | -| Access | HMAC keys for S3-compatible access (Workload Identity service account with storage permissions is configured but not currently used by Materialize for GCS access, in future we will remove HMAC keys and support access to GCS either via Workload Identity Federation or via Kubernetes ServiceAccounts that impersonate IAM service accounts) | -| Versioning | Disabled (for testing; enable in production) | - -### Kubernetes Add-ons - -| Resource | Description | -|----------|-------------| -| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | -| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | - -### Materialize - -| Resource | Description | -|----------|-------------| -| Operator | Materialize Kubernetes operator in `materialize` namespace | -| Instance | Single Materialize instance in `materialize-environment` namespace | -| Load Balancers | GCP Load Balancers for Materialize access | - ---- - -## Required APIs -Your GCP project needs several APIs enabled. Here's what each API does in simple terms: - -```bash -# Enable these APIs in your project -gcloud services enable container.googleapis.com # For creating Kubernetes clusters -gcloud services enable sqladmin.googleapis.com # For creating databases -gcloud services enable cloudresourcemanager.googleapis.com # For managing GCP resources -gcloud services enable servicenetworking.googleapis.com # For private network connections -gcloud services enable iamcredentials.googleapis.com # For security and authentication -``` - -## Getting Started - -### Step 1: Set Required Variables - -Before running Terraform, create a `terraform.tfvars` file with the following variables: - -```hcl -project_id = "my-gcp-project" -name_prefix = "simple-demo" -region = "us-central1" -license_key = "your-materialize-license-key" -labels = { - environment = "demo" - created_by = "terraform" -} -``` - -**Required Variables:** -- `project_id`: GCP project ID -- `name_prefix`: Prefix for all resource names -- `region`: GCP region for deployment -- `labels`: Map of labels to apply to resources -- `license_key`: Materialize license key (required for production use) - ---- - -### Step 2: Deploy Materialize - -Run the usual Terraform workflow: - -```bash -terraform init -terraform apply -``` - -## Notes - -* ***GCP Storage Authentication Limitation:*** Materialize currently only supports HMAC key authentication for GCS access (S3-compatible API). - Current State: The modules configure both HMAC keys and Workload Identity, but Materialize uses HMAC keys for actual storage access. - Future: Native GCS access via Workload Identity Federation or Kubernetes service account impersonation will be supported in a future release, eliminating the need for static credentials. -* You can customize each module independently. -* To reduce cost in your demo environment, you can tweak machine types and database tiers in `main.tf`. - -***Don't forget to destroy resources when finished:*** -```bash -terraform destroy -``` diff --git a/doc/user/content/installation/operational-guidelines.md b/doc/user/content/installation/operational-guidelines.md deleted file mode 100644 index 0ad4a3baf4ae2..0000000000000 --- a/doc/user/content/installation/operational-guidelines.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "Operational guidelines" -description: "" -aliases: - - /self-hosted/operational-guidelines/ -menu: - main: - parent: "installation" - weight: 80 - identifier: "sm-operational-guidelines" ---- - -## Recommended instance types - -As a general guideline, we recommend: - -- ARM-based CPU -- A 1:8 ratio of vCPU to GiB memory is recommend. -- When using swap, it is recommend to use a 1:16 ratio of vCPU to GiB local instance storage - -See also the specific cloud provider guidance: - -- [AWS Deployment - guidelines](/installation/install-on-aws/appendix-deployment-guidelines/#recommended-instance-types) - -- [GCP Deployment - guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/#recommended-instance-types) - -- [Azure Deployment - guidelines](/installation/install-on-azure/appendix-deployment-guidelines/#recommended-instance-types) - -## TLS - -When running with TLS in production, run with certificates from an official -Certificate Authority (CA) rather than self-signed certificates. - -## Locally-attached NVMe storage - -Configuring swap on nodes to using locally-attached NVMe storage allows -Materialize to spill to disk when operating on datasets larger than main memory. -This setup can provide significant cost savings and provides a more graceful -degradation rather than OOMing. Network-attached storage (like EBS volumes) can -significantly degrade performance and is not supported. - -Refer to the specific cloud provider guidelines: - -- [AWS Deployment - guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) - -- [GCP Deployment - guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) - -- [Azure Deployment - guidelines](/installation/install-on-azure/appendix-deployment-guidelines/) - -## See also - -- [Configuration](/installation/configuration/) -- [Installation](/installation/) -- [Troubleshooting](/installation/troubleshooting/) diff --git a/doc/user/content/releases/_index.md b/doc/user/content/releases/_index.md index 3cad2ecc32dba..77aa3b140a06b 100644 --- a/doc/user/content/releases/_index.md +++ b/doc/user/content/releases/_index.md @@ -77,7 +77,7 @@ file="shared-content/self-managed/gcp-terraform-v0.6.1-upgrade-notes.md" >}}. - To upgrade if **not** using a Materialize-provided Terraforms, you must prepare your nodes by adding the required labels. For detailed instructions, see [Prepare for swap and upgrade to -v26.0](/installation/upgrade-to-swap/). +v26.0](/self-managed-deployments/appendix/upgrade-to-swap/). ### SASL/SCRAM-SHA-256 support @@ -103,7 +103,7 @@ Starting in v26.0.0, Self-Managed Materialize requires a license key. For new deployments, you configure your license key in the Kubernetes Secret resource during the installation process. For details, see the [installation -guides](/installation/). For existing deployments, you can configure your +guides](/self-managed-deployments/installation/). For existing deployments, you can configure your license key via: ```bash @@ -146,7 +146,7 @@ use the new setting `rolloutStrategy` to specify either: - `WaitUntilReady` (*Default*) - `ImmediatelyPromoteCausingDowntime` -For more information, see [`rolloutStrategy`](/installation/upgrading/#rollout-strategies). +For more information, see [`rolloutStrategy`](/self-managed-deployments/upgrading/#rollout-strategies). ### Terraform helpers @@ -195,7 +195,8 @@ See also Upgrade Notes for release specific notes. {{< include-md file="shared-content/self-managed/upgrade-notes/v26.0.md" >}} -See also [Version specific upgrade notes](/installation/upgrading/#version-specific-upgrade-notes). +See also [Version specific upgrade +notes](/self-managed-deployments/upgrading/#version-specific-upgrade-notes). ## See also diff --git a/doc/user/content/security/self-managed/authentication.md b/doc/user/content/security/self-managed/authentication.md index 5c06c58c16798..9884f134d65fd 100644 --- a/doc/user/content/security/self-managed/authentication.md +++ b/doc/user/content/security/self-managed/authentication.md @@ -162,7 +162,7 @@ spec: forceRollout: # Rollout without requiring a version change ``` -For more information on rollout configuration, view our [installation overview](/installation/#rollout-configuration). +For more information on rollout configuration, view our [upgrade overview](/self-managed-deployments/upgrading/#rollout-configuration). {{< include-md file="shared-content/auth-kind-warning.md" >}} diff --git a/doc/user/content/self-managed-deployments/_index.md b/doc/user/content/self-managed-deployments/_index.md new file mode 100644 index 0000000000000..6dd2790e881d9 --- /dev/null +++ b/doc/user/content/self-managed-deployments/_index.md @@ -0,0 +1,347 @@ +--- +title: "Self-Managed Deployments" +description: "Learn about the key components and architecture of self-managed Materialize deployments." +disable_list: true +aliases: + - /self-hosted/concepts/ + - /self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/ +menu: + main: + identifier: "sm-deployments" + weight: 5 +--- + +## Overview + +Self-Managed Materialize deployments on Kubernetes consist of several layers of +components that work together to provide a fully functional database +environment. Understanding these components and how they interact is essential +for deploying, managing, and troubleshooting your Self-Managed Materialize. + +This page provides an overview of the core architectural components in a Self-Managed deployment, from the infrastructure level (Helm chart) down to the application level (clusters and replicas). + +## Architecture layers + +A Self-Managed Materialize deployment is organized into the following layers: + +Layer | Component | Description +------|-----------|------------ +**Infrastructure** | [Helm Chart](#helm-chart) | Package manager component that bootstraps the Kubernetes deployment +**Orchestration** | [Materialize Operator](#materialize-operator) | Kubernetes operator that manages Materialize instances +**Database** | [Materialize Instance](#materialize-instance) | The Materialize database instance itself +**Compute** | [Clusters and Replicas](#clusters-and-replicas) | Isolated compute resources for workloads + +## Helm chart + +The Helm chart is the entry point for deploying Materialize in a self-managed Kubernetes environment. It serves as a package manager component that defines and deploys the Materialize Operator. + +### Working with the Helm chart + +You interact with the Helm chart through standard Helm commands. For example: + +- To add the Materialize Helm chart repository: + + ```bash + helm repo add materialize https://materializeinc.github.io/materialize + ``` + +- To update the repository index: + + ```bash + helm repo update materialize + ``` + +- To install the Materialize Helm chart and deploy the Materialize Operator and + other resources: + + ```bash + helm install materialize materialize/materialize-operator + ``` + +- To upgrade the the Materialize Helm chart (and the Materialize Operator and + other resources): + + ```bash + helm upgrade materialize materialize/materialize-operator + ``` + +- To uninstall the Helm chart (and the Materialize Operator and other + resources): + + ```bash + helm uninstall materialize + ``` + +### What gets installed + +```bash +helm install materialize materialize/materialize-operator +``` + +When you install the the Materialize Helm Chart, it: + +- Deploys the **Materialize Operator** as a Kubernetes deployment. +- Creates necessary cluster-wide resources (CRDs, RBAC roles, service accounts). +- Configures operator settings and permissions. + +Once installed, the **Materialize Operator** handles the deployment and +management of Materialize instances. + +## Materialize Operator + +The Materialize Operator (implemented as `orchestratord`) is a Kubernetes operator that automates the deployment and lifecycle management of Materialize instances. It implements the [Kubernetes operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) to extend Kubernetes with domain-specific knowledge about Materialize. + +### Managed resources + +The operator watches for Materialize custom resources and creates/manages all the Kubernetes resources required to run a Materialize instance, including: + +- **Namespaces**: Isolated Kubernetes namespaces for each instance +- **Services**: Network services for connecting to Materialize +- **Network Policies**: Network isolation and security rules +- **Certificates**: TLS certificates for secure connections +- **ConfigMaps and Secrets**: Configuration and sensitive data +- **Deployments**: These support the `balancerd` and `console` pod used as the ingress layer for Materialize. +- **StatefulSets**: `environmentd` and `clusterd` which are the database control plane and compute resources respectively. + +### Configuration + +For configuration options for the Materialize Operator, see +the [Materialize Operator Configuration +page](/self-managed-deployments/appendix/configuration/). + +## Materialize Instance + +A Materialize instance is the actual database that you connect to and interact with. Each instance is an isolated Materialize deployment with its own data, configuration, and compute resources. + +### Components + +When you create a Materialize instance, the operator deploys three core components as Kubernetes resources: + +- **`environmentd`**: The main database control plane, deployed as a + StatefulSet. + + **`environmentd`** runs as a Kubernetes pod and is the primary component of a + Materialize instance. It houses the control plane and contains: + + - **Adapter**: The SQL interface that handles client connections, query parsing, and planning + - **Storage Controller**: Maintains durable metadata for storage + - **Compute Controller**: Orchestrates compute resources and manages system state + + On startup, `environmentd` will create several built-in clusters. + + When you connect to Materialize with a SQL client (e.g., `psql`), you're connecting to `environmentd`. + +- **balancerd**: A pgwire and http proxy used to connect to environmentd, + deployed as a Deployment. +- **console**: Web-based administration interface, deployed as a Deployment. + +### Instance responsibilities + +A Materialize instance manages: + +- **SQL objects**: Sources, views, materialized views, indexes, sinks +- **Schemas and databases**: Logical organization of objects +- **User connections**: SQL client connections and authentication +- **Catalog metadata**: System information about all objects and configuration +- **Compute orchestration**: Coordination of work across clusters and replicas + + +### Deploying with the operator + +To deploy Materialize instances with the operator, create and apply Materialize +custom resources definitions(CRDs). For a full list of fields available for the +Materialize CR, see [Materialize CRD Field +Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/). + +```yaml +apiVersion: materialize.cloud/v1alpha1 +kind: Materialize +metadata: + name: 12345678-1234-1234-1234-123456789012 + namespace: materialize-environment +spec: + environmentdImageRef: materialize/environmentd:{{< self-managed/versions/get-latest-version >}} +# ... additional fields omitted for brevity +``` + +When you first apply the Materialize custom resource, the operator automatically +creates all required Kubernetes resources. + +### Modifying the custom resource + +To modify a custom resource, update the CRD with your changes, including the +`requestRollout` field with a new UUID value. When you apply the CRD, the +operator will roll out the changes. + +{{< note >}} If you do not specify a new `requestRollout` UUID, the operator +watches for updates but does not roll out the changes. +{{< /note >}} + +For a full list of fields available for the Materialize CR, see [Materialize CRD +Field +Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/). + +See also: + +- [Upgrade Overview](/self-managed-deployments/upgrading/) + +### Connecting to an instance + +Once deployed, you interact with a Materialize instance through standard +PostgreSQL-compatible tools and drivers: + +```bash +# Connect with psql +psql "postgres://materialize@:6875/materialize" +``` + +Once connected, you can issue SQL commands to create sources, define views, run queries, and manage the database: + +```sql +-- Create a source +CREATE SOURCE my_source FROM KAFKA ...; + +-- Create a materialized view +CREATE MATERIALIZED VIEW my_view AS + SELECT ... FROM my_source ...; + +-- Query the view +SELECT * FROM my_view; +``` + +## Clusters and Replicas + +Clusters are isolated pools of compute resources that execute workloads in Materialize. They provide resource isolation and fault tolerance for your data processing pipelines. + +For a comprehensive overview of clusters in Materialize, see the [Clusters concept page](/concepts/clusters/). + +### Cluster architecture + +- **Clusters**: Logical groupings of compute resources dedicated to specific workloads (sources, sinks, indexes, materialized views, queries) +- **Replicas**: Physical instantiations of a cluster's compute resources, deployed as Kubernetes StatefulSets + +Each replica contains identical compute resources and processes the same data independently, providing fault tolerance and high availability. + +### Kubernetes resources + +When you create a cluster with one or more replicas in Materialize, the instance coordinates with the operator to create: + +- One or more **StatefulSet** resources (one per replica) +- **Pods** within each StatefulSet that execute the actual compute workload +- **Persistent volumes** (if configured) for scratch disk space + +For example: + +```sql +-- Create a cluster with 2 replicas +CREATE CLUSTER my_cluster SIZE = '100cc', REPLICATION FACTOR = 2; +``` + +This creates two separate StatefulSets in Kubernetes, each running compute processes. + +### Managing clusters + +You interact with clusters primarily through SQL: + +```sql +-- Create a cluster +CREATE CLUSTER ingest_cluster SIZE = '50cc', REPLICATION FACTOR = 1; + +-- Use the previous cluster for a source +CREATE SOURCE my_source + IN CLUSTER ingest_cluster + FROM KAFKA ...; + +-- Create a cluster for materialized views +CREATE CLUSTER compute_cluster SIZE = '100cc', REPLICATION FACTOR = 2; + +-- Use the previous cluster for a materialized view +CREATE MATERIALIZED VIEW my_view + IN CLUSTER compute_cluster AS + SELECT ... FROM my_source ...; + +-- Resize a cluster +ALTER CLUSTER compute_cluster SET (SIZE = '200cc'); + +``` + +Materialize handles the underlying Kubernetes resource creation and management automatically. + +## Workflow + +The following outlines the workflow process, summarizing how the various +components work together: + +1. **Install the Helm chart**: This deploys the Materialize Operator to your + Kubernetes cluster. + +1. **Create a Materialize instance**: Apply a Materialize custom resource. The + operator detects this and creates all necessary Kubernetes resources, + including the `environmentd`, `balancerd`, and `console` pods. + +1. **Connect to the instance**: Use a SQL client to connect to the + `environmentd` service endpoint. + +1. **Create clusters**: Issue SQL commands to create clusters. Materialize + coordinates with the operator to provision StatefulSets for replicas. + +1. **Run your workloads**: Create sources, materialized views, indexes, and + sinks on your clusters. + +## Terraform Modules + +To help you get started, Materialize provides Terraform modules. + +{{< important >}} +These modules are intended for evaluation/demonstration purposes and for serving +as a template when building your own production deployment. The modules should +not be directly relied upon for production deployments: **future releases of the +modules will contain breaking changes.** Instead, to use as a starting point for +your own production deployment, either: + +- Fork the repo and pin to a specific version; or + +- Use the code as a reference when developing your own deployment. + +{{}} + +{{< tabs >}} +{{< tab "Unified Terraform Modules (New!)" >}} +### Unified Terraform Modules + +Materialize provides a [**unified Terraform +module**](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main?tab=readme-ov-file#materialize-self-managed-terraform-modules), +which provides concrete examples and an opinionated model for deploying Materialize. + +{{< yaml-table data="self_managed/terraform_list" >}} + +{{< /tab >}} +{{< tab "Legacy Terraform Modules" >}} +### Legacy Terraform Modules + +{{< yaml-table data="self_managed/terraform_list_legacy" >}} +{{< /tab >}} +{{< /tabs >}} + +## Relationship to Materialize concepts + +Self-managed deployments implement the same core Materialize concepts as the Cloud offering: + +- [**Clusters**](/concepts/clusters/): Identical behavior, but backed by Kubernetes StatefulSets +- [**Sources**](/concepts/sources/): Same functionality for ingesting data +- [**Views**](/concepts/views/): Same query semantics and incremental maintenance +- [**Indexes**](/concepts/indexes/): Same in-memory query acceleration +- [**Sinks**](/concepts/sinks/): Same data egress capabilities + +The Self-Managed deployment model adds the Kubernetes infrastructure layer (Helm +chart and operator) but does not change how you interact with Materialize at the +SQL level. + +## Related pages + +- [Installation guides](/self-managed-deployments/installation/) +- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Materialize CRD Field Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) +- [Operational guidelines](/self-managed-deployments/deployment-guidelines/) +- [Clusters concept page](/concepts/clusters/) +- [Materialize architecture overview](/concepts/) diff --git a/doc/user/content/self-managed-deployments/appendix/_index.md b/doc/user/content/self-managed-deployments/appendix/_index.md new file mode 100644 index 0000000000000..fe6aadb199778 --- /dev/null +++ b/doc/user/content/self-managed-deployments/appendix/_index.md @@ -0,0 +1,18 @@ +--- +title: "Appendix" +description: "" +menu: + main: + parent: "sm-deployments" + identifier: "sm-deployments-appendix" + weight: 99 +disable_list: true +--- + +## Table of contents + +- [Appendix: Materialize Operator Configuration Parameters](./configuration/) +- [Appendix: Materialize CRD Field Descriptions](./materialize-crd-field-descriptions/) +- [Appendix: Cluster sizes](./appendix-cluster-sizes/) +- [Appendix: Prepare for swap and upgrade to v26.0](./upgrade-to-swap/) +- [Appendix: Self-managed release versions](./release-versions/) diff --git a/doc/user/content/installation/appendix-cluster-sizes.md b/doc/user/content/self-managed-deployments/appendix/appendix-cluster-sizes.md similarity index 93% rename from doc/user/content/installation/appendix-cluster-sizes.md rename to doc/user/content/self-managed-deployments/appendix/appendix-cluster-sizes.md index 848609a04cb93..ddf2804e33a10 100644 --- a/doc/user/content/installation/appendix-cluster-sizes.md +++ b/doc/user/content/self-managed-deployments/appendix/appendix-cluster-sizes.md @@ -1,13 +1,14 @@ --- -title: "Appendix: Cluster sizes" +title: "Cluster sizes" description: "Reference page on self-managed cluster sizes" menu: main: - parent: "installation" + parent: "sm-deployments-appendix" identifier: "appendix-cluster-sizes" weight: 96 aliases: - /self-managed/v25.1/sql/appendix-cluster-sizes/ + - /installation/appendix-cluster-sizes/ --- ## Default Cluster Sizes diff --git a/doc/user/content/installation/configuration.md b/doc/user/content/self-managed-deployments/appendix/configuration.md similarity index 90% rename from doc/user/content/installation/configuration.md rename to doc/user/content/self-managed-deployments/appendix/configuration.md index 181e3ee7ebfc1..da618b25448e5 100644 --- a/doc/user/content/installation/configuration.md +++ b/doc/user/content/self-managed-deployments/appendix/configuration.md @@ -4,10 +4,12 @@ description: "Configuration reference for the Materialize Operator Helm chart" aliases: - /self-hosted/configuration/ - /self-managed/v25.1/installation/configuration/ + - /self-managed/nstallation/configuration/ + - /installation/configuration/ menu: main: - parent: "installation" - weight: 70 + parent: "sm-deployments-appendix" + weight: 10 --- ## Configure the Materialize operator diff --git a/doc/user/content/self-managed-deployments/appendix/legacy/_index.md b/doc/user/content/self-managed-deployments/appendix/legacy/_index.md new file mode 100644 index 0000000000000..3c535a60cb270 --- /dev/null +++ b/doc/user/content/self-managed-deployments/appendix/legacy/_index.md @@ -0,0 +1,11 @@ +--- +title: "Legacy Terraform: Releases and configurations" +description: "" +menu: + main: + parent: "sm-deployments-appendix" + identifier: "sm-deployments-appendix-legacy" + weight: 96 +--- + +## Table of contents diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-aws.md similarity index 93% rename from doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md rename to doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-aws.md index 70db19b5f3048..90007290e71d8 100644 --- a/doc/user/content/installation/install-on-aws/legacy-terraform-module/appendix-configuration.md +++ b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-aws.md @@ -1,11 +1,11 @@ --- -title: "Appendix: Required configuration" +title: "Required configuration (Legacy AWS Terraform)" description: "Required configuration for Materialize on AWS Terraform." menu: main: - parent: "install-on-aws-legacy-terraform-module" - identifier: "appendix-aws-provider-config" - weight: 50 + parent: "sm-deployments-appendix-legacy" + identifier: "appendix-legacy-aws-provider-config" + weight: 20 aliases: - /installation/install-on-aws/appendix-aws-provider-configuration --- diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-azure.md similarity index 95% rename from doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md rename to doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-azure.md index 2146660c94f74..f4157b0b0da3d 100644 --- a/doc/user/content/installation/install-on-azure/legacy-terraform-module/appendix-configuration.md +++ b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-azure.md @@ -1,11 +1,11 @@ --- -title: "Appendix: Required configuration" +title: "Required configuration (Legacy Azure Terraform)" description: "Required configuration for Materialize on Azure Terraform." menu: main: - parent: "install-on-azure-legacy-terraform-module" - identifier: "appendix-azure-config" - weight: 50 + parent: "sm-deployments-appendix-legacy" + identifier: "appendix-legacy-azure-config" + weight: 30 aliases: - /installation/install-on-azure/appendix-azure-provider-configuration --- diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-gcp.md similarity index 94% rename from doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md rename to doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-gcp.md index 288ac69839a61..6cd67c573681d 100644 --- a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/appendix-configuration.md +++ b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-gcp.md @@ -1,11 +1,11 @@ --- -title: "Appendix: Configuration" +title: "Required configuration (Legacy GCP Terraform)" description: "Required configuration for Materialize on GCP Terraform (legacy)." menu: main: - parent: "install-on-gcp-legacy-terraform-module" - identifier: "legacy-terraform-module-appendix-configuration" - weight: 50 + parent: "sm-deployments-appendix-legacy" + identifier: "appendix-legacy-gcp-configuration" + weight: 40 aliases: - /installation/install-on-gcp/appendix-gcp-configuration/ --- diff --git a/doc/user/content/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases.md b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases.md new file mode 100644 index 0000000000000..08e65a9299eaa --- /dev/null +++ b/doc/user/content/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases.md @@ -0,0 +1,38 @@ +--- +title: "Legacy Terraform Releases" +description: +menu: + main: + parent: "sm-deployments-appendix-legacy" + weight: 10 +aliases: + - /installation/appendix-terraforms/ +--- + + +## Legacy Terraform Modules + +{{< yaml-table data="self_managed/terraform_list_legacy" >}} + +## Materialize on AWS Terraform module (Legacy) {#materialize-on-aws-terraform-module} + +{{< yaml-table data="self_managed/aws_terraform_versions" >}} + +{{% self-managed/aws-terraform-upgrade-notes %}} + +See also [Upgrade Notes]( +https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#upgrade-notes) +for release-specific upgrade notes. + + +## Materialize on Azure Terraform module (Legacy){#materialize-on-azure-terraform-module} + +{{< yaml-table data="self_managed/azure_terraform_versions" >}} + +{{% self-managed/azure-terraform-upgrade-notes %}} + +## Materialize on GCP Terraform module (Legacy) {#materialize-on-gcp-terraform-module} + +{{< yaml-table data="self_managed/gcp_terraform_versions" >}} + +{{% self-managed/gcp-terraform-upgrade-notes %}} diff --git a/doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md b/doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md new file mode 100644 index 0000000000000..480f04cba30ed --- /dev/null +++ b/doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md @@ -0,0 +1,13 @@ +--- +title: "Materialize CRD Field Descriptions" +description: "Reference page on Materialize CRD Fields" +menu: + main: + parent: "sm-deployments-appendix" + identifier: "materialize-crd-field-descriptions" + weight: 15 +aliases: + - /installation/appendix-materialize-crd-field-descriptions/ +--- + +{{% self-managed/materialize-crd-descriptions %}} diff --git a/doc/user/content/installation/upgrade-to-swap.md b/doc/user/content/self-managed-deployments/appendix/upgrade-to-swap.md similarity index 83% rename from doc/user/content/installation/upgrade-to-swap.md rename to doc/user/content/self-managed-deployments/appendix/upgrade-to-swap.md index fe6da754dd4b8..0caffaab8e34d 100644 --- a/doc/user/content/installation/upgrade-to-swap.md +++ b/doc/user/content/self-managed-deployments/appendix/upgrade-to-swap.md @@ -1,11 +1,13 @@ --- -title: "Appendix: Prepare for swap and upgrade to v26.0" +title: "Prepare for swap and upgrade to v26.0" description: "Upgrade procedure for v26.0 if not using Materialize Terraform." menu: main: - parent: "installation" + parent: "sm-deployments-appendix" weight: 95 identifier: "helm-upgrade-from-v25.2.12-aws" +aliases: + - /installation/upgrade-to-swap/ --- {{< annotation type="Disambiguation" >}} @@ -27,7 +29,7 @@ file="shared-content/self-managed/gcp-terraform-v0.6.1-upgrade-notes.md" >}}. - {{< include-md file="shared-content/self-managed/azure-terraform-v0.6.1-upgrade-notes.md" >}}. -See also [Upgrade Overview](/installation/upgrading/). +See also [Upgrade Overview](/self-managed-deployments/upgrading/). {{< /annotation >}} diff --git a/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md b/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md new file mode 100644 index 0000000000000..9dc3c6dbed28e --- /dev/null +++ b/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md @@ -0,0 +1,18 @@ +--- +title: "Deployment guidelines" +description: "" +disable_list: false +menu: + main: + parent: "sm-deployments" + identifier: "deployment-guidelines" + weight: 60 +aliases: + - /installation/operational-guidelines/ +--- + +{{% self-managed/materialize-components-sentence %}} + +## Available deployment guidelines + +The following provides guidelines for cloud-specific deployments: diff --git a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md b/doc/user/content/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines.md similarity index 63% rename from doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md rename to doc/user/content/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines.md index 248f169d4b7b4..c8e54a83baed2 100644 --- a/doc/user/content/installation/install-on-aws/appendix-deployment-guidelines.md +++ b/doc/user/content/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines.md @@ -1,22 +1,25 @@ --- -title: "Appendix: AWS deployment guidelines" -description: "AWS environment setup/deployment guidelines" -aliases: - - /self-hosted/operational-guidelines/ +title: "AWS deployment guidelines" +description: "General guidelines when deploying Self-Managed Materialize on AWS." +disable_list: true menu: main: - parent: "install-on-aws" + parent: "deployment-guidelines" identifier: "aws-deployment-guidelines" - weight: 40 + weight: 10 +aliases: + - /installation/install-on-aws/appendix-deployment-guidelines/ --- +{{% self-managed/materialize-components-sentence %}} + ## Recommended instance types As a general guideline, we recommend: - ARM-based CPU - A 1:8 ratio of vCPU to GiB memory. -- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. +- A 8:1 ratio of GiB local instance storage to GiB memory when using swap. {{% self-managed/aws-recommended-instances %}} @@ -30,13 +33,18 @@ significantly degrade performance and is not supported. ### Swap support -***New Unified Terraform*** +{{< tabs >}} +{{< tab "New Unified Terraform" >}} + +#### New Unified Terraform The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws/examples/simple) supports configuring swap out of the box. -***Legacy Terraform*** +{{< /tab >}} +{{< tab "Legacy Terraform" >}} +#### Legacy Terraform -The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_swap_enabled) variable. +The Legacy Terraform provider adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-aws-materialize?tab=readme-ov-file#input_swap_enabled) variable. With this change, the Terraform: - Creates a node group for Materialize. - Configures NVMe instance store volumes as swap using a daemonset. @@ -48,13 +56,15 @@ See [Upgrade Notes](https://github.com/MaterializeInc/terraform-aws-materialize? If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} +{{< /tab >}} +{{< /tabs >}} + ## TLS When running with TLS in production, run with certificates from an official Certificate Authority (CA) rather than self-signed certificates. -## See also +## Upgrading guideline -- [Configuration](/installation/configuration/) -- [Installation](/installation/) -- [Troubleshooting](/installation/troubleshooting/) +{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" +>}} diff --git a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md b/doc/user/content/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines.md similarity index 76% rename from doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md rename to doc/user/content/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines.md index b31df4552a7c0..aee6fa39ec978 100644 --- a/doc/user/content/installation/install-on-azure/appendix-deployment-guidelines.md +++ b/doc/user/content/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines.md @@ -1,20 +1,23 @@ --- -title: "Appendix: Azure deployment guidelines" -description: "Azure environment setup/deployment guidelines" +title: "Azure deployment guidelines" +description: "General guidelines when deploying Self-Managed Materialize on Azure." +disable_list: true menu: main: - parent: "install-on-azure" + parent: "deployment-guidelines" identifier: "azure-deployment-guidelines" - weight: 40 + weight: 20 +aliases: + - /installation/install-on-azure/appendix-deployment-guidelines/ --- ## Recommended instance types As a general guideline, we recommend: -- ARM-based CPU +- ARM-based CPU. - A 1:8 ratio of vCPU to GiB memory. -- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. +- An 8:1 ratio of GiB local instance storage to GiB memory when using swap. ### Recommended Azure VM Types with Local NVMe Disks @@ -30,12 +33,12 @@ Azure VM Types with Local NVMe Disk: | Standard_E16pds_v6 | 16 | 128 GiB | 600 GiB | ~4.7:1 | | Standard_E32pds_v6 | 32 | 256 GiB | 1,200 GiB | ~4.7:1 | -{{< important >}} +{{< warning >}} -These VM types provide **ephemeral** local NVMe SSD disks. Data is lost -when the VM is stopped or deleted. +These VM types provide **ephemeral** local NVMe SSD disks. Data is +**lost** when the VM is stopped or deleted. -{{}} +{{}} ## Locally-attached NVMe storage @@ -47,11 +50,15 @@ significantly degrade performance and is not supported. ### Swap support -***New Unified Terraform*** +{{< tabs >}} +{{< tab "New Unified Terraform" >}} +#### New Unified Terraform The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure/examples/simple) supports configuring swap out of the box. -***Legacy Terraform*** +{{< /tab >}} +{{< tab "Legacy Terraform" >}} +#### Legacy Terraform The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-azurerm-materialize?tab=readme-ov-file#input_swap_enabled) variable. With this change, the Terraform: @@ -65,6 +72,9 @@ See [Upgrade Notes](https://github.com/MaterializeInc/terraform-azurerm-material If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} +{{< /tab >}} +{{< /tabs >}} + ## Recommended Azure Blob Storage Materialize writes **block** blobs on Azure. As a general guideline, we @@ -75,8 +85,7 @@ recommend **Premium block blob** storage accounts. When running with TLS in production, run with certificates from an official Certificate Authority (CA) rather than self-signed certificates. -## See also +## Upgrading guideline -- [Configuration](/installation/configuration/) -- [Installation](/installation/) -- [Troubleshooting](/installation/troubleshooting/) +{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" +>}} diff --git a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md b/doc/user/content/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines.md similarity index 87% rename from doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md rename to doc/user/content/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines.md index 87cc1c9e2e783..9dad9976dd44e 100644 --- a/doc/user/content/installation/install-on-gcp/appendix-deployment-guidelines.md +++ b/doc/user/content/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines.md @@ -1,20 +1,23 @@ --- -title: "Appendix: GCP deployment guidelines" -description: "GCP environment setup/deployment guidelines" +title: "GCP deployment guidelines" +description: "General guidelines when deploying Self-Managed Materialize on GCP." +disable_list: true menu: main: - parent: "install-on-gcp" + parent: "deployment-guidelines" identifier: "gcp-deployment-guidelines" - weight: 40 + weight: 30 +aliases: + - /installation/install-on-gcp/appendix-deployment-guidelines/ --- ## Recommended instance types As a general guideline, we recommend: -- ARM-based CPU +- ARM-based CPU. - A 1:8 ratio of vCPU to GiB memory. -- When using swap, use a 8:1 ratio of GiB local instance storage to GiB memory. +- An 8:1 ratio of GiB local instance storage to GiB memory when using swap. When operating on GCP in production, we recommend the following machine types that support local SSD attachment: @@ -72,11 +75,16 @@ significantly degrade performance and is not supported. ### Swap support -***New Unified Terraform*** +{{< tabs >}} +{{< tab "New Unified Terraform" >}} + +#### New Unified Terraform The unified Materialize [Terraform module](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp/examples/simple) supports configuring swap out of the box. -***Legacy Terraform*** +{{< /tab >}} +{{< tab "Legacy Terraform" >}} +#### Legacy Terraform The Legacy Terraform provider, adds preliminary swap support in v0.6.1, via the [`swap_enabled`](https://github.com/MaterializeInc/terraform-google-materialize?tab=readme-ov-file#input_swap_enabled) variable. With this change, the Terraform: @@ -89,7 +97,8 @@ See [Upgrade Notes](https://github.com/MaterializeInc/terraform-google-materiali {{< note >}} If deploying `v25.2`, Materialize clusters will not automatically use swap unless they are configured with a `memory_request` less than their `memory_limit`. In `v26`, this will be handled automatically. {{< /note >}} - +{{< /tab >}} +{{< /tabs >}} ## CPU affinity @@ -102,9 +111,7 @@ to substantially improve the performance of compute-bound workloads. When running with TLS in production, run with certificates from an official Certificate Authority (CA) rather than self-signed certificates. +## Upgrading guideline -## See also - -- [Configuration](/installation/configuration/) -- [Installation](/installation/) -- [Troubleshooting](/installation/troubleshooting/) +{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" +>}} diff --git a/doc/user/content/installation/faq.md b/doc/user/content/self-managed-deployments/faq.md similarity index 81% rename from doc/user/content/installation/faq.md rename to doc/user/content/self-managed-deployments/faq.md index 4107064488008..710a25a4d0f5f 100644 --- a/doc/user/content/installation/faq.md +++ b/doc/user/content/self-managed-deployments/faq.md @@ -1,11 +1,12 @@ --- -title: "FAQ: Self-managed installation" -description: "Frequently asked questions about self-managed installations." +title: "FAQ: Self-managed deployments" +description: "Frequently asked questions about self-managed deployments." aliases: - /self-hosted/faq/ + - /installation/faq/ menu: main: - parent: "installation" + parent: "sm-deployments" weight: 92 --- diff --git a/doc/user/content/self-managed-deployments/installation/_index.md b/doc/user/content/self-managed-deployments/installation/_index.md new file mode 100644 index 0000000000000..0305bdc8ddbf4 --- /dev/null +++ b/doc/user/content/self-managed-deployments/installation/_index.md @@ -0,0 +1,25 @@ +--- +title: "Installation" +description: "Installation guides for Self-Managed Materialize." +disable_list: true +menu: + main: + parent: "sm-deployments" + identifier: "installation" + weight: 5 +aliases: + - /self-managed/v25.1/installation/ + - /installation/ +--- + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-intro" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-helm" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-unified" %}} + +{{% include-from-yaml data="self_managed/installation" +name="installation-landing-guides-legacy" %}} diff --git a/doc/user/content/self-managed-deployments/installation/install-on-aws.md b/doc/user/content/self-managed-deployments/installation/install-on-aws.md new file mode 100644 index 0000000000000..2a3143031959b --- /dev/null +++ b/doc/user/content/self-managed-deployments/installation/install-on-aws.md @@ -0,0 +1,275 @@ +--- +title: "Install on AWS" +description: "Install Materialize on AWS using the Unified Terraform module." +aliases: + - /self-hosted/install-on-aws/ + - /self-managed/v25.1/installation/install-on-aws/ + - /installation/install-on-aws/ +disable_list: true +menu: + main: + parent: "installation" + identifier: "install-on-aws" + weight: 20 +--- + +Materialize provides a set of modular [Terraform +modules](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +that can be used to deploy all services required for Materialize to run on AWS. +The module is intended to provide a simple set of examples on how to deploy +Materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +{{% self-managed/materialize-components-sentence %}} The example on this page +deploys a complete Materialize environment on AWS using the modular Terraform +setup from this repository. + + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + + +## What Gets Created + +This example provisions the following infrastructure: + +### Networking + +| Resource | Description | +|----------|-------------| +| VPC | 10.0.0.0/16 with DNS hostnames and support enabled | +| Subnets | 3 private subnets (10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24) and 3 public subnets (10.0.101.0/24, 10.0.102.0/24, 10.0.103.0/24) across availability zones us-east-1a, us-east-1b, us-east-1c | +| NAT Gateway | Single NAT Gateway for all private subnets | +| Internet Gateway | For public subnet connectivity | + +### Compute + +| Resource | Description | +|----------|-------------| +| EKS Cluster | Version 1.32 with CloudWatch logging (API, audit) | +| Base Node Group | 2 nodes (t4g.medium) for Karpenter and CoreDNS | +| Karpenter | Auto-scaling controller with two node classes: Generic nodepool (t4g.xlarge instances for general workloads) and Materialize nodepool (r7gd.2xlarge instances with swap enabled and dedicated taints to run materialize instance workloads) | + +### Database + +| Resource | Description | +|----------|-------------| +| RDS PostgreSQL | Version 15, db.t3.large instance | +| Storage | 50GB allocated, autoscaling up to 100GB | +| Deployment | Single-AZ (non-production configuration) | +| Backups | 7-day retention | +| Security | Dedicated security group with access from EKS cluster and nodes | + +### Storage + +| Resource | Description | +|----------|-------------| +| S3 Bucket | Dedicated bucket for Materialize persistence | +| Encryption | Disabled (for testing; enable in production) | +| Versioning | Disabled (for testing; enable in production) | +| IAM Role | IRSA role for Kubernetes service account access | + +### Kubernetes Add-ons + +| Resource | Description | +|----------|-------------| +| AWS Load Balancer Controller | For managing Network Load Balancers | +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | + +### Materialize + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Network Load Balancer | Dedicated internal NLB for Materialize access {{< yaml-table data="self_managed/default_ports" >}} | + + +## Prerequisites + +### AWS Account Requirements + +An active AWS account with appropriate permissions to create: +- EKS clusters +- RDS instances +- S3 buckets +- VPCs and networking resources +- IAM roles and policies + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + + +### License Key + +{{< yaml-table data="self_managed/license_key" >}} + +## Getting started: Simple example + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +### Step 1: Set Up the Environment + +1. Open a terminal window. + +1. Clone the Materialize Terraform repository and go to the + `aws/examples/simple` directory. + + ```bash + git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git + cd materialize-terraform-self-managed/aws/examples/simple + ``` + +### Step 2: Configure Terraform Variables + +1. Create a `terraform.tfvars` file with the following variables: + + - `name_prefix`: Prefix for all resource names (e.g., `simple-demo`) + - `aws_region`: AWS region for deployment (e.g., `us-east-1`) + - `aws_profile`: AWS CLI profile to use + - `license_key`: Materialize license key + - `tags`: Map of tags to apply to resources + + ```hcl + name_prefix = "simple-demo" + aws_region = "us-east-1" + aws_profile = "your-aws-profile" + license_key = "your-materialize-license-key" + tags = { + environment = "demo" + } + ``` + +### Step 3: Apply the Terraform + +1. Initialize the Terraform directory to download the required providers + and modules: + + ```bash + terraform init + ``` + +1. Apply the Terraform configuration to create the infrastructure. + + - To deploy with the default **internal NLB** for Materialize access: + + ```bash + terraform apply + ``` + + - To deploy with **public NLB** for Materialize access: + + ```bash + terraform apply -var="internal=false" + ``` + + If you are satisfied with the planned changes, type `yes` when prompted + to proceed. + + +1. From the output, you will need the following fields to connect: + - `dns_name` + - `external_login_password_mz_system` + + {{< tip >}} + + To get the unredacted value for `external_login_password_mz_system`, + you can run `terraform output -json external_login_password_mz_system` + + {{< /tip >}} + +### Step 4. Optional. Verify the deployment. + +1. Configure `kubectl` to connect to your cluster: + + ```bash + aws eks update-kubeconfig --name --region + ``` + +1. Check the status of your deployment: + {{% include-from-yaml data="self_managed/installation" + name="installation-verify-status" %}} + +### Step 5: Connect to Materialize + +Using the `dns_name` and `external_login_password_mz_system` from the Terraform +output, you can connect to Materialize via the Materialize Console to create +your users. + +{{< note >}} + +If using an **internal Network Load Balancer (NLB)** for your Materialize +access, you can connect from inside the same VPC or from networks that are +privately connected to it. + +{{< /note >}} + +1. To connect to the Materialize Console, open a browser to + `https://:8080`, substituting your ``. + + {{< tip >}} + + {{% include-from-yaml data="self_managed/installation" + name="install-uses-self-signed-cluster-issuer" %}} + + {{< /tip >}} + +1. Log in as `mz_system`, using `external_login_password_mz_system` as the + password and create new users. + + In general, other than the initial login to create new users for new + deployments, avoid using `mz_system` since `mz_system` also used by the + Materialize Operator for upgrades and maintenance tasks. + +1. Once new users are created, logout as `mz_system` and login as one of the + created user. + + For non-`mz_system` users, you can connect using the Materialize Console + or PostgreSQL-compatible tools and drivers using the following ports: + + {{< yaml-table data="self_managed/default_ports" >}} + +For more information on authentication and authorization for Self-Managed +Materialize, see: + +- [Authentication](/security/self-managed/authentication/) +- [Access Control](/security/self-managed/access-control/) + + +## Customizing Your Deployment + +For more information on the Terraform modules, see both the [top +level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +and [AWS +specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) +details. + +{{< tip >}} +You can customize each module independently. To reduce cost in your demo environment, you can tweak subnet CIDRs and instance types in `main.tf`. +{{< /tip >}} + +For details on recommended instance sizing and configuration, see the [AWS +deployment +guide](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines/). + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + + +## See Also + +- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md new file mode 100644 index 0000000000000..f2135765fd440 --- /dev/null +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -0,0 +1,274 @@ +--- +title: "Install on Azure" +description: "Install Materialize on Azure using the Unified Terraform module." +menu: + main: + parent: "installation" + identifier: "install-on-azure" + weight: 20 +--- + +Materialize provides a set of modular [Terraform +modules](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +that can be used to deploy all services required for Materialize to run on Azure. +The module is intended to provide a simple set of examples on how to deploy +Materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +{{% self-managed/materialize-components-sentence %}} The example on this page +deploys a complete Materialize environment on Azure using the modular Terraform +setup from this repository. + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +## What Gets Created + +This example provisions the following infrastructure: + +### Resource Group + +| Resource | Description | +|----------|-------------| +| Resource Group | New resource group to contain all resources | + +### Networking + +| Resource | Description | +|----------|-------------| +| Virtual Network | 20.0.0.0/16 address space | +| AKS Subnet | 20.0.0.0/20 with NAT Gateway association and service endpoints for Storage and SQL | +| PostgreSQL Subnet | 20.0.16.0/24 delegated to PostgreSQL Flexible Server | +| NAT Gateway | Standard SKU with static public IP for outbound connectivity | +| Private DNS Zone | For PostgreSQL private endpoint resolution with VNet link | + +### Compute + +| Resource | Description | +|----------|-------------| +| AKS Cluster | Version 1.32 with Cilium networking (network plugin: azure, data plane: cilium, policy: cilium) | +| Default Node Pool | Standard_D4pds_v6 VMs, autoscaling 2-5 nodes, labeled for generic workloads | +| Materialize Node Pool | Standard_E4pds_v6 VMs with 100GB disk, autoscaling 2-5 nodes, swap enabled, dedicated taints for Materialize workloads | +| Managed Identities | AKS cluster identity (used by AKS control plane to provision Azure resources like load balancers and network interfaces) and Workload identity (used by Materialize pods for secure, passwordless authentication to Azure Storage) | + +### Database + +| Resource | Description | +|----------|-------------| +| Azure PostgreSQL Flexible Server | Version 15 | +| SKU | GP_Standard_D2s_v3 (2 vCores, 4GB memory) | +| Storage | 32GB with 7-day backup retention | +| Network Access | Public Network Access is disabled, Private access only (no public endpoint) | +| Database | `materialize` database pre-created | + +### Storage + +| Resource | Description | +|----------|-------------| +| Storage Account | Premium BlockBlobStorage with LRS replication for Materialize persistence | +| Container | `materialize` blob container | +| Access Control | Workload Identity federation for Kubernetes service account (passwordless authentication via OIDC) | +| Network Access | Currently allows **all traffic**(production deployments should restrict to AKS subnet only traffic) | + +### Kubernetes Add-ons + +| Resource | Description | +|----------|-------------| +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | + +### Materialize + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Load Balancers | Internal Azure Load Balancers for Materialize access | + +## Prerequisites + +### Azure Account Requirements + +An active Azure subscription with appropriate permissions to create: +- AKS clusters +- Azure PostgreSQL Flexible Server instances +- Storage accounts +- Virtual networks and networking resources +- Managed identities and role assignments + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +### License Key + +{{< yaml-table data="self_managed/license_key" >}} + +## Getting started: Simple example + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +### Step 1: Set Up the Environment + +1. Open a terminal window. + +1. Clone the Materialize Terraform repository and go to the + `azure/examples/simple` directory. + + ```bash + git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git + cd materialize-terraform-self-managed/azure/examples/simple + ``` + +1. Authenticate with Azure. + + ```bash + az login + ``` + + The command opens a browser window to sign in to Azure. Sign in. + +1. Select the subscription and tenant to use. After you have signed in, back in + the terminal, your tenant and subscription information is displayed. + + ```none + Retrieving tenants and subscriptions for the selection... + + [Tenant and subscription selection] + + No Subscription name Subscription ID Tenant + ----- ------------------- ------------------------------------ ---------------- + [1]* ... ... ... + + The default is marked with an *; the default tenant is '' and + subscription is '' (). + ``` + + Select the subscription and tenant. + +### Step 2: Configure Terraform Variables + +1. Create a `terraform.tfvars` file with the following variables: + + - `subscription_id`: Azure subscription ID + - `resource_group_name`: Name for the resource group to create (e.g. + `mz-demo-rg`) + - `name_prefix`: Prefix for all resource names (e.g., `simple-demo`) + - `location`: Azure region for deployment (e.g., `westus2`) + - `license_key`: Materialize license key + - `tags`: Map of tags to apply to resources + + ```hcl + subscription_id = "your-subscription-id" + resource_group_name = "mz-demo-rg" + name_prefix = "simple-demo" + location = "westus2" + license_key = "your-materialize-license-key" + tags = { + environment = "demo" + } + ``` + +### Step 3: Apply the Terraform + +1. Initialize the Terraform directory to download the required providers + and modules: + + ```bash + terraform init + ``` + +1. Apply the Terraform configuration to create the infrastructure. + + - To deploy with the default **internal NLB** for Materialize access: + + ```bash + terraform apply + ``` + + - To deploy with **public NLB** for Materialize access: + + ```bash + terraform apply -var="internal=false" + ``` + + If you are satisfied with the planned changes, type `yes` when prompted + to proceed. + +1. From the output, you will need the following field(s) to connect: + - `console_load_balancer_ip` + +### Step 4. Optional. Verify the deployment. + +1. Configure `kubectl` to connect to your cluster: + + ```bash + az aks get-credentials --resource-group --name + ``` + +1. Check the status of your deployment: + {{% include-from-yaml data="self_managed/installation" + name="installation-verify-status" %}} + +### Step 5: Connect to Materialize + +Using the `console_load_balancer_ip` from the Terraform output, you can connect +to Materialize via the Materialize Console. + +{{< note >}} + +If using an **internal Network Load Balancer (NLB)** for your Materialize +access, you can connect from inside the same VPC or from networks that are +privately connected to it. + +{{< /note >}} + +1. To connect to the Materialize Console, open a browser to + `https://:8080`, substituting your + ``. + + {{< tip >}} + + {{% include-from-yaml data="self_managed/installation" + name="install-uses-self-signed-cluster-issuer" %}} + + {{< /tip >}} + +## Customizing Your Deployment + +For more information on the Terraform modules, see both the [top +level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +and [Azure +specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) +details. + +{{< tip >}} +You can customize each module independently. To reduce cost in your demo environment, you can tweak VM sizes and database tiers in `main.tf`. +{{< /tip >}} + +{{< note >}} +Autoscaling: Uses Azure's native cluster autoscaler that integrates directly with Azure Virtual Machine Scale Sets for automated node scaling. In future we are planning to enhance this by making use of karpenter-provider-azure. +{{< /note >}} + +For details on recommended instance sizing and configuration, see the [Azure +deployment +guide](/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines/). + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + +## See Also + +- [Materialize Operator Configuration](/installation/configuration/) +- [Troubleshooting](/installation/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md new file mode 100644 index 0000000000000..1d09caf4494d2 --- /dev/null +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -0,0 +1,288 @@ +--- +title: "Install on GCP" +description: "" +menu: + main: + parent: "installation" + identifier: "install-gcp-terraform" + weight: 30 + +--- + +Materialize provides a set of modular [Terraform +modules](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +that can be used to deploy all services required for Materialize to run on Google Cloud. +The module is intended to provide a simple set of examples on how to deploy +Materialize. It can be used as is or modules can be taken from the example and +integrated with existing DevOps tooling. + +{{% self-managed/materialize-components-sentence %}} The example on this page +deploys a complete Materialize environment on GCP using the modular Terraform +setup from this repository. + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +## What Gets Created + +This example provisions the following infrastructure: + +### Networking + +| Resource | Description | +|----------|-------------| +| VPC Network | Custom VPC with auto-create subnets disabled | +| Subnet | 192.168.0.0/20 primary range with private Google access enabled | +| Secondary Ranges | Pods: 192.168.64.0/18, Services: 192.168.128.0/20 | +| Cloud Router | For NAT and routing configuration | +| Cloud NAT | For outbound internet access from private nodes | +| VPC Peering | Service networking connection for Cloud SQL private access | + +### Compute + +| Resource | Description | +|----------|-------------| +| GKE Cluster | Regional cluster with Workload Identity enabled | +| Generic Node Pool | e2-standard-8 machines, autoscaling 2-5 nodes, 50GB disk, for general workloads | +| Materialize Node Pool | n2-highmem-8 machines, autoscaling 2-5 nodes, 100GB disk, 1 local SSD, swap enabled, dedicated taints for Materialize workloads | +| Service Account | GKE service account with workload identity binding | + +### Database + +| Resource | Description | +|----------|-------------| +| Cloud SQL PostgreSQL | Private IP only (no public IP) | +| Tier | db-custom-2-4096 (2 vCPUs, 4GB memory) | +| Database | `materialize` database with UTF8 charset | +| User | `materialize` user with auto-generated password | +| Network | Connected via VPC peering for private access | + +### Storage + +| Resource | Description | +|----------|-------------| +| Cloud Storage Bucket | Regional bucket for Materialize persistence | +| Access | HMAC keys for S3-compatible access (Workload Identity service account with storage permissions is configured but not currently used by Materialize for GCS access, in future we will remove HMAC keys and support access to GCS either via Workload Identity Federation or via Kubernetes ServiceAccounts that impersonate IAM service accounts) | +| Versioning | Disabled (for testing; enable in production) | + +### Kubernetes Add-ons + +| Resource | Description | +|----------|-------------| +| cert-manager | Certificate management controller for Kubernetes that automates TLS certificate provisioning and renewal | +| Self-signed ClusterIssuer | Provides self-signed TLS certificates for Materialize instance internal communication (balancerd, console). Used by the Materialize instance for secure inter-component communication. | + +### Materialize + +| Resource | Description | +|----------|-------------| +| Operator | Materialize Kubernetes operator in `materialize` namespace | +| Instance | Single Materialize instance in `materialize-environment` namespace | +| Load Balancers | GCP Load Balancers for Materialize access {{< yaml-table data="self_managed/default_ports" >}} | + +## Prerequisites + +### GCP Account Requirements + +- A Google account with permission to enable Google Cloud APIs/services on for +your project. +- A [Google service +account](https://docs.cloud.google.com/iam/docs/service-accounts-create#creating) +with appropriate permissions to create: + - GKE clusters + - Cloud SQL instances + - Cloud Storage buckets + - VPC networks and networking resources + - Service accounts and IAM bindings + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [gcloud CLI](https://cloud.google.com/sdk/docs/install) +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) +- [kubectl gke plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) + +### License Key + +{{< yaml-table data="self_managed/license_key" >}} + +## Getting started: Simple example + +{{< warning >}} + +{{< self-managed/terraform-disclaimer >}} + +{{< /warning >}} + +### Step 1: Set Up the Environment + +1. Open a terminal window. + +1. Clone the Materialize Terraform repository and go to the + `gcp/examples/simple` directory. + + ```bash + git clone https://github.com/MaterializeInc/materialize-terraform-self-managed.git + cd materialize-terraform-self-managed/gcp/examples/simple + ``` + +1. Initialize the gcloud CLI (`gcloud init`) to specify the GCP project you want + to use. For details, see the [Initializing the gcloud CLI + documentation](https://cloud.google.com/sdk/docs/initializing#initialize_the). + + {{< tip >}} + You do not need to configure a default Compute Region and Zone as you will + specify the region. + {{}} + +1. Enable the following APIs for your project: + + ```bash + gcloud services enable container.googleapis.com # For creating Kubernetes clusters + gcloud services enable compute.googleapis.com # For creating GKE nodes and other compute resources + gcloud services enable sqladmin.googleapis.com # For creating databases + gcloud services enable cloudresourcemanager.googleapis.com # For managing GCP resources + gcloud services enable servicenetworking.googleapis.com # For private network connections + gcloud services enable iamcredentials.googleapis.com # For security and authentication + gcloud services enable iam.googleapis.com # For managing IAM service accounts and policies + gcloud services enable storage.googleapis.com # For Cloud Storage buckets + ``` + +1. For the service account, authenticate to allow Terraform to + interact with your GCP project. For details, see [Terraform: Google Cloud + Provider Configuration + reference](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#authentication). + + For example, if using [User Application Default + Credentials](https://cloud.google.com/sdk/gcloud/reference/auth/application-default), + you can run the following command: + + ```bash + gcloud auth application-default login + ``` + + {{< tip >}} + If using `GOOGLE_APPLICATION_CREDENTIALS`, use absolute path to your key file. + {{}} + + +### Step 2: Configure Terraform Variables + +1. Create a `terraform.tfvars` file with the following variables: + + - `project_id`: GCP project ID + - `name_prefix`: Prefix for all resource names (e.g., `simple-demo`) + - `region`: GCP region for deployment (e.g., `us-central1`) + - `license_key`: Materialize license key + - `labels`: Map of labels to apply to resources + + ```hcl + project_id = "my-gcp-project" + name_prefix = "simple-demo" + region = "us-central1" + license_key = "your-materialize-license-key" + labels = { + environment = "demo" + created_by = "terraform" + } + ``` + +### Step 3: Apply the Terraform + +1. Initialize the Terraform directory to download the required providers + and modules: + + ```bash + terraform init + ``` + +1. Apply the Terraform configuration to create the infrastructure. + + - To deploy with the default **internal NLB** for Materialize access: + + ```bash + terraform apply + ``` + + - To deploy with **public NLB** for Materialize access: + + ```bash + terraform apply -var="internal=false" + ``` + + If you are satisfied with the planned changes, type `yes` when prompted + to proceed. + +1. From the output, you will need the following field(s) to connect: + +### Step 4. Optional. Verify the deployment. + +1. Configure `kubectl` to connect to your cluster: + + ```bash + gcloud container clusters get-credentials \ + --region \ + --project + ``` +1. Check the status of your deployment: + {{% include-from-yaml data="self_managed/installation" + name="installation-verify-status" %}} + +### Step 5: Connect to Materialize + +Using the `console_load_balancer_ip` from the Terraform output, you can connect +to Materialize via the Materialize Console. + +{{< note >}} + +If using an **internal Network Load Balancer (NLB)** for your Materialize +access, you can connect from inside the same VPC or from networks that are +privately connected to it. + +{{< /note >}} + +1. To connect to the Materialize Console, open a browser to + `https://:8080`, substituting your + ``. + + {{< tip >}} + + {{% include-from-yaml data="self_managed/installation" + name="install-uses-self-signed-cluster-issuer" %}} + + {{< /tip >}} + +## Customizing Your Deployment + +For more information on the Terraform modules, see both the [top +level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +and [GCP +specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) +details. + +{{< tip >}} +You can customize each module independently. To reduce cost in your demo environment, you can tweak machine types and database tiers in `main.tf`. +{{< /tip >}} + +{{< note >}} +**GCP Storage Authentication Limitation:** Materialize currently only supports HMAC key authentication for GCS access (S3-compatible API). +Current State: The modules configure both HMAC keys and Workload Identity, but Materialize uses HMAC keys for actual storage access. +Future: Native GCS access via Workload Identity Federation or Kubernetes service account impersonation will be supported in a future release, eliminating the need for static credentials. +{{< /note >}} + +For details on recommended instance sizing and configuration, see the [GCP +deployment +guide](/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines/). + +## Cleanup + +{{% self-managed/cleanup-cloud %}} + +## See Also + +- [Materialize Operator Configuration](/installation/configuration/) +- [Troubleshooting](/installation/troubleshooting/) diff --git a/doc/user/content/installation/install-on-local-kind/_index.md b/doc/user/content/self-managed-deployments/installation/install-on-local-kind.md similarity index 99% rename from doc/user/content/installation/install-on-local-kind/_index.md rename to doc/user/content/self-managed-deployments/installation/install-on-local-kind.md index 18cba888b1bb1..88f380e3ef98b 100644 --- a/doc/user/content/installation/install-on-local-kind/_index.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-local-kind.md @@ -6,12 +6,12 @@ aliases: - /installation/install-on-local-minikube/ - /self-managed/v25.1/installation/install-on-local-kind/ - /self-managed/v25.1/installation/install-on-local-minikube/ + - /installation/install-on-local-kind/ menu: main: parent: "installation" identifier: "install-on-local-kind" weight: 10 -disable_list: true --- {{% self-managed/materialize-components-sentence %}} diff --git a/doc/user/content/self-managed-deployments/installation/legacy/_index.md b/doc/user/content/self-managed-deployments/installation/legacy/_index.md new file mode 100644 index 0000000000000..d64b5b7c6fb01 --- /dev/null +++ b/doc/user/content/self-managed-deployments/installation/legacy/_index.md @@ -0,0 +1,12 @@ +--- +title: "Install Guides (Legacy)" +description: "Install Self-Managed Materialize using legacy Terraform modules" +menu: + main: + parent: "installation" + weight: 50 + identifier: "installation-legacy" +--- + +For deployments that are using the legacy Terraform modules, the following +upgrade guides are available: diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md b/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md similarity index 95% rename from doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md rename to doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md index dc04c39636c1e..10a8ad023265a 100644 --- a/doc/user/content/installation/install-on-aws/legacy-terraform-module/install.md +++ b/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md @@ -1,20 +1,21 @@ --- -title: "Install" -description: "" -aliases: - - /self-hosted/install-on-aws/ +title: "Install on AWS(Legacy Terraform)" +description: "Install on AWS using the legacy Terraform module." +disable_list: true +disable_toc: true menu: main: - parent: "install-on-aws-legacy-terraform-module" - identifier: "install-aws" + parent: "installation-legacy" + identifier: "install-aws-terraform-legacy" weight: 5 --- + {{% self-managed/materialize-components-sentence %}} The tutorial deploys Materialize to AWS Elastic Kubernetes Service (EKS) with a PostgreSQL RDS database as the metadata database and AWS S3 for blob storage. -The tutorial uses [Materialize on AWS Terraform +The tutorial uses the [Legacy Materialize on AWS Terraform module](https://github.com/MaterializeInc/terraform-aws-materialize) to: - Set up the AWS Kubernetes environment. @@ -31,8 +32,7 @@ module](https://github.com/MaterializeInc/terraform-aws-materialize) to: {{% self-managed/aws-recommended-instances %}} -See [Appendix: AWS Deployment -guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) for +See [AWS Deployment guidelines](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines/) for more information. ## Prerequisites @@ -308,7 +308,7 @@ components: {{< tip >}} {{% self-managed/aws-terraform-upgrade-notes %}} - See [Materialize on AWS releases](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) for notable changes. + See [Materialize on AWS releases](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-aws-terraform-module) for notable changes. {{}} 1. Run `terraform plan` with both `.tfvars` files and review the changes to be @@ -409,7 +409,7 @@ components: ``` If you run into an error during deployment, refer to the - [Troubleshooting](/installation/troubleshooting/). + [Troubleshooting](/self-managed-deployments/troubleshooting/). 1. Open the Materialize Console in your browser: @@ -474,8 +474,5 @@ components: ## See also -- [Materialize Operator Configuration](/installation/configuration/) -- [Troubleshooting](/installation/troubleshooting/) -- [Appendix: AWS Deployment -guidelines](/installation/install-on-aws/appendix-deployment-guidelines/) -- [Installation](/installation/) +- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md b/doc/user/content/self-managed-deployments/installation/legacy/install-on-azure-legacy.md similarity index 98% rename from doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md rename to doc/user/content/self-managed-deployments/installation/legacy/install-on-azure-legacy.md index 4d340e0ee08fc..d31c46f28857d 100644 --- a/doc/user/content/installation/install-on-azure/legacy-terraform-module/install.md +++ b/doc/user/content/self-managed-deployments/installation/legacy/install-on-azure-legacy.md @@ -1,11 +1,11 @@ --- -title: "Install" +title: "Install on Azure (Legacy Terraform)" description: "Install Materialize on Azure Kubernetes Service (AKS) using Terraform" menu: main: - parent: "install-on-azure-legacy-terraform-module" - identifier: "install-azure" - weight: 5 + parent: "installation-legacy" + identifier: "install-azure-terraform-legacy" + weight: 20 --- @@ -361,7 +361,7 @@ deploys a sample infrastructure on Azure with the following components: {{< tip >}} {{% self-managed/azure-terraform-upgrade-notes %}} - See [Materialize on Azure releases](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) for notable changes. + See [Materialize on Azure releases](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-azure-terraform-module) for notable changes. {{}} 1. Run `terraform plan` with both `.tfvars` files and review the changes to be diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md b/doc/user/content/self-managed-deployments/installation/legacy/install-on-gcp-legacy.md similarity index 97% rename from doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md rename to doc/user/content/self-managed-deployments/installation/legacy/install-on-gcp-legacy.md index 0eec79daba6ab..b91fdb8f99eef 100644 --- a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/install.md +++ b/doc/user/content/self-managed-deployments/installation/legacy/install-on-gcp-legacy.md @@ -1,14 +1,13 @@ --- -title: "Install" +title: "Install on GCP (Legacy Terraform)" description: "" aliases: - /self-hosted/install-on-gcp/ - - /installation/install-on-gcp/ menu: main: - parent: "install-on-gcp-legacy-terraform-module" - identifier: "legacy-terraform-module-install" - weight: 5 + parent: "installation-legacy" + identifier: "install-gcp-terraform-legacy" + weight: 30 --- {{% self-managed/materialize-components-sentence %}} @@ -424,7 +423,7 @@ components: {{< tip >}} {{% self-managed/gcp-terraform-upgrade-notes %}} - See [Materialize on GCP releases](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) for notable changes. + See [Materialize on GCP releases](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-gcp-terraform-module) for notable changes. {{}} 1. Run `terraform plan` with both `.tfvars` files and review the changes to be @@ -577,7 +576,5 @@ components: ## See also -- [Troubleshooting](/installation/troubleshooting/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) - [Materialize Operator Configuration](/installation/configuration/) -- [Appendix: Google deployment guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/) -- [Installation](/installation/) diff --git a/doc/user/content/installation/release-versions.md b/doc/user/content/self-managed-deployments/release-versions.md similarity index 71% rename from doc/user/content/installation/release-versions.md rename to doc/user/content/self-managed-deployments/release-versions.md index bd4ef9565b2e9..af435a4ec2e81 100644 --- a/doc/user/content/installation/release-versions.md +++ b/doc/user/content/self-managed-deployments/release-versions.md @@ -3,8 +3,10 @@ title: "Self-managed release versions" description: "" menu: main: - parent: "installation" + parent: "sm-deployments" weight: 85 +aliases: + - /installation/release-versions/ --- ## V26 releases diff --git a/doc/user/content/installation/troubleshooting.md b/doc/user/content/self-managed-deployments/troubleshooting.md similarity index 95% rename from doc/user/content/installation/troubleshooting.md rename to doc/user/content/self-managed-deployments/troubleshooting.md index a7fdd20674421..5be696e1d8d7b 100644 --- a/doc/user/content/installation/troubleshooting.md +++ b/doc/user/content/self-managed-deployments/troubleshooting.md @@ -4,16 +4,15 @@ description: "" aliases: - /self-hosted/troubleshooting/ - /self-managed/v25.1/installation/troubleshooting/ + - /installation/troubleshooting/ menu: main: - parent: "installation" + parent: "sm-deployments" weight: 90 --- ## Troubleshooting Kubernetes -### Materialize operator - To check the status of the Materialize operator: ```shell @@ -127,8 +126,3 @@ To increase the cluster's size, you can follow the following steps: quickstart | r1 (25cc) | (6 rows) ``` - -## See also - -- [Configuration](/installation/configuration/) -- [Installation](/installation/) diff --git a/doc/user/content/installation/upgrading.md b/doc/user/content/self-managed-deployments/upgrading/_index.md similarity index 50% rename from doc/user/content/installation/upgrading.md rename to doc/user/content/self-managed-deployments/upgrading/_index.md index e1f6abc55a8db..bee4ce077abb9 100644 --- a/doc/user/content/installation/upgrading.md +++ b/doc/user/content/self-managed-deployments/upgrading/_index.md @@ -1,34 +1,38 @@ --- -title: "Upgrade Overview" +title: "Upgrading" description: "Upgrading Self-Managed Materialize." +disable_list: true menu: main: - parent: "installation" + parent: "sm-deployments" + weight: 30 + identifier: "upgrading" --- -The following provides a general outline and examples for upgrading Materialize. +## Upgrading guidelines -For a more specific set of steps, please consult the deployment-specific upgrade -documentation: - - [Kind](/installation/install-on-local-kind/upgrade-on-local-kind/) - - [AWS (legacy Terraform)](/installation/install-on-aws/legacy-terraform-module/upgrade/) - - [GCP (legacy Terraform)](/installation/install-on-gcp/legacy-terraform-module/upgrade/) - - [Azure (legacy Terraform)](/installation/install-on-azure/legacy-terraform-module/upgrade/) +{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" >}} -***When upgrading always***: -- Upgrade the operator first and ensure version compatibility between the operator and the Materialize instance you are upgrading to. -- Upgrade your Materialize instances after upgrading the operator to ensure compatibility. -- Check the [version specific upgrade notes](#version-specific-upgrade-notes). +## Upgrade guides -### Upgrading the Helm Chart and Kubernetes Operator +The following upgrade guides are available: + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-landing-guides-helm" %}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-landing-guides-legacy" %}} + + +## Upgrading the Helm Chart and Kubernetes Operator {{< important >}} -When upgrading Materialize, always upgrade the operator first. +When upgrading Materialize, always upgrade the Operator first. {{}} -The Materialize Kubernetes operator is deployed via Helm and can be updated through standard Helm upgrade commands. +The Materialize Kubernetes Operator is deployed via Helm and can be updated through standard Helm upgrade commands. ```shell helm upgrade my-materialize-operator materialize/misc/helm-charts/operator @@ -40,29 +44,33 @@ If you have custom values, make sure to include your values file: helm upgrade my-materialize-operator materialize/misc/helm-charts/operator -f my-values.yaml ``` -### Upgrading Materialize Instances +## Upgrading Materialize Instances + +To minimize unexpected downtime and avoid connection drops at critical +periods for your application, the upgrade process involves two steps: + +- First, stage the changes (`environmentdImageRef` with the new version) to the + Materialize custom resource. The Operator watches for changes but does not + automatically roll out the changes. -In order to minimize unexpected downtime and avoid connection drops at critical -periods for your application, changes are not immediately and automatically -rolled out by the Operator. Instead, the upgrade process involves two steps: -- First, staging spec changes to the Materialize custom resource. -- Second, applying the changes via a `rolloutRequest`. +- Second, roll out the changes by specifying a new UUID for `requestRollout`. -When upgrading your Materialize instances, you'll first want to update the `environmentdImageRef` field in the Materialize custom resource spec. -#### Updating the `environmentdImageRef` -To find a compatible version with your currently deployed Materialize operator, check the `appVersion` in the Helm repository. +### Updating the `environmentdImageRef` + +When upgrading your Materialize instances, you'll first want to update the +`environmentdImageRef` field in the Materialize custom resource spec. + +To find a compatible version with your currently deployed Materialize Operator, check the `appVersion` in the Helm repository. ```shell helm list -n materialize ``` Using the returned version, we can construct an image ref. -We always recommend using the official Materialize image repository -`docker.io/materialize/environmentd`. ``` -environmentdImageRef: docker.io/materialize/environmentd:v26.0.0 +environmentdImageRef: docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}} ``` The following is an example of how to patch the version. @@ -71,10 +79,16 @@ The following is an example of how to patch the version. kubectl patch materialize \ -n \ --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\"}}" + -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" ``` -#### Applying the changes via `rolloutRequest` +{{< note >}} +Until you specify a new `requestRollout`, the Operator +watches for updates but does not roll out the changes. +{{< /note >}} + + +### Applying the changes via `requestRollout` To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. Be sure to consult the [Rollout Configurations](#rollout-configuration) to ensure you've selected the correct rollout behavior. @@ -90,13 +104,13 @@ kubectl patch materialize \ It is possible to combine both operations in a single command if preferred: ```shell -kubectl patch materialize 12345678-1234-1234-1234-123456789012 \ +kubectl patch materialize \ -n materialize-environment \ --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:v26.0.0\", \"requestRollout\": \"$(uuidgen)\"}}" + -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" ``` -#### Using YAML Definition +### Using YAML Definition Alternatively, you can update your Materialize custom resource definition directly: @@ -107,8 +121,8 @@ metadata: name: 12345678-1234-1234-1234-123456789012 namespace: materialize-environment spec: - environmentdImageRef: materialize/environmentd:v26.0.0 # Update version as needed - requestRollout: 22222222-2222-2222-2222-222222222222 # Generate new UUID + environmentdImageRef: materialize/environmentd:{{< self-managed/versions/get-latest-version >}} # Update version as needed + requestRollout: 22222222-2222-2222-2222-222222222222 # Use a new UUID forceRollout: 33333333-3333-3333-3333-333333333333 # Optional: for forced rollouts inPlaceRollout: false # In Place rollout is deprecated and ignored. Please use rolloutStrategy rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. Can be WaitUntilReady or ImmediatelyPromoteCausingDowntime @@ -121,9 +135,9 @@ Apply the updated definition: kubectl apply -f materialize.yaml ``` -### Rollout Configuration +## Rollout Configuration -#### Forced Rollouts +### Forced Rollouts If you need to force a rollout even when there are no changes to the instance: @@ -134,23 +148,17 @@ kubectl patch materialize \ -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\", \"forceRollout\": \"$(uuidgen)\"}}" ``` -#### Rollout Strategies -The behavior of the new version rollout follows your `rolloutStrategy` setting: - -`WaitUntilReady` (default): - -New instances are created and all dataflows are determined to be ready before cutover and terminating the old version, temporarily requiring twice the resources during the transition. - -`ImmediatelyPromoteCausingDowntime`: - -Tears down the prior version before creating and promoting the new version. This causes downtime equal to the duration it takes for dataflows to hydrate, but does not require additional resources. - -#### In Place Rollout +### Rollout strategies -`inPlaceRollout` has been deprecated and will be ignored. +The behavior of the new version rollout follows your `rolloutStrategy` setting: +| `rolloutStrategy` | Description | +| ----------------- | -----------------------------------| +| `WaitUntilReady` | *Default*. New instances are created and all dataflows are determined to be ready before cutover and terminating the old version, temporarily requiring twice the resources during the transition. | +| `ImmediatelyPromoteCausingDowntime`| Tears down the prior version before creating and promoting the new version. This causes downtime equal to the duration it takes for dataflows to hydrate, but does not require additional resources. | +| `inPlaceRollout`| *Deprecated*. The setting is ignored. | -### Verifying the Upgrade +## Verifying the Upgrade After initiating the rollout, you can monitor the status field of the Materialize custom resource to check on the upgrade. @@ -161,20 +169,26 @@ kubectl get materialize -n materialize-environment -w # Check the logs of the operator kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize ``` -### Version Specific Upgrade Notes +## Version Specific Upgrade Notes + +### Upgrading to `v26.1` and later versions -#### Upgrading to `v26.1` and later versions {{< include-md file="shared-content/self-managed/upgrade-notes/v26.1.md" >}} -#### Upgrading between minor versions less than `v26` - - Prior to `v26`, you must upgrade at most one minor version at a time. For example, upgrading from `v25.1.5` to `v25.2.15` is permitted. +### Upgrading to `v26.0` + +{{< include-md file="shared-content/self-managed/upgrade-notes/v26.0.md" >}} -#### Upgrading between minor versions less than `v26` +### Upgrading between minor versions less than `v26` - Prior to `v26`, you must upgrade at most one minor version at a time. For example, upgrading from `v25.1.5` to `v25.2.16` is permitted. ## See also -- [Configuration](/installation/configuration/) -- [Installation](/installation/) -- [Troubleshooting](/installation/troubleshooting/) +- [Materialize Operator + Configuration](/self-managed-deployments/appendix/configuration/) + +- [Materialize CRD Field + Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) + +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md b/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md new file mode 100644 index 0000000000000..d79598e75f0a5 --- /dev/null +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md @@ -0,0 +1,12 @@ +--- +title: "Upgrade Guides (Legacy)" +description: "Upgrading Self-Managed Materialize using legacy Terraform modules" +menu: + main: + parent: "upgrading" + weight: 30 + identifier: "upgrading-legacy" +--- + +For deployments that are using the legacy Terraform modules, the following +upgrade guides are available: diff --git a/doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy.md similarity index 93% rename from doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md rename to doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy.md index de77568fc4085..aadbcace8f3f4 100644 --- a/doc/user/content/installation/install-on-aws/legacy-terraform-module/upgrade.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy.md @@ -1,10 +1,10 @@ --- -title: "Upgrade" +title: "Upgrade on AWS (Legacy Terraform)" description: "Procedure to upgrade your Materialize operator and instances running on AWS" menu: main: - parent: "install-on-aws-legacy-terraform-module" - identifier: "upgrade-on-aws" + parent: "upgrading-legacy" + identifier: "upgrade-on-aws-legacy" weight: 10 --- @@ -102,7 +102,7 @@ documentation](https://helm.sh/docs/intro/install/). {{< tip >}} {{% self-managed/aws-terraform-upgrade-notes %}} - See [Materialize on AWS releases](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) for notable changes. + See [Materialize on AWS releases](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-aws-terraform-module) for notable changes. {{}} diff --git a/doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md similarity index 95% rename from doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md rename to doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md index c11593bfe4014..3e89d7d341a78 100644 --- a/doc/user/content/installation/install-on-azure/legacy-terraform-module/upgrade.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md @@ -1,11 +1,14 @@ --- -title: "Upgrade" +title: "Upgrade on Azure (Legacy Terraform)" description: "Procedure to upgrade your Materialize operator and instances running on Azure" +disable_list: true +disable_toc: true menu: main: - parent: "install-on-azure-legacy-terraform-module" - identifier: "upgrade-on-azure" - weight: 10 + parent: "upgrading-legacy" + identifier: "upgrade-on-azure-legacy" + weight: 20 + --- {{< annotation type="Disambiguation" >}} @@ -25,7 +28,7 @@ Materialize deployment running on Azure Kubernetes Service (AKS). The tutorial assumes you have installed Materialize on Azure Kubernetes Service (AKS) using the instructions on [Install on -Azure](/installation/install-on-azure/) (either from the examples/simple +Azure](/self-managed-deployments/installation/legacy/install-on-azure-legacy/) (either from the examples/simple directory or the root). ## Version compatibility diff --git a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md similarity index 93% rename from doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md rename to doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md index 92f9f2d0b93d2..9d2eac02df589 100644 --- a/doc/user/content/installation/install-on-gcp/legacy-terraform-module/upgrade.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md @@ -1,13 +1,13 @@ --- -title: "Upgrade" -description: "Procedure to upgrade your Materialize operator and instances running on GCP" +title: "Upgrade on GCP (Legacy Terraform)" +description: "" +disable_list: true +disable_toc: true menu: main: - parent: "install-on-gcp-legacy-terraform-module" - identifier: "legacy-terraform-module-upgrade" - weight: 10 -aliases: - - /installation/install-on-gcp/upgrade-on-gcp + parent: "upgrading-legacy" + identifier: "upgrade-on-gcp-legacy-terraform-module" + weight: 30 --- {{< annotation type="Disambiguation" >}} @@ -19,7 +19,7 @@ file="shared-content/self-managed/gcp-terraform-v0.6.1-upgrade-notes.md" >}}. - To upgrade to `v26.0` if **not** using a Materialize-provided Terraforms, you must prepare your nodes by adding the required labels. For detailed instructions, see [Prepare for swap and upgrade to -v26.0](/installation/upgrade-to-swap/). +v26.0](/self-managed-deployments/appendix/upgrade-to-swap/). {{< /annotation >}} @@ -28,7 +28,7 @@ Materialize deployment running on GCP Google Kubernetes Engine (GKE). The tutorial assumes you have installed Materialize on GCP Google Kubernetes Engine (GKE) using the instructions on [Install on -GCP](/installation/install-on-gcp/) (either from the examples/simple directory +GCP](/self-managed-deployments/installation/legacy/install-on-gcp-legacy/) (either from the examples/simple directory or the root). ## Version compatibility @@ -202,7 +202,7 @@ If you want to use `jq` and do not have `jq` installed, install. {{< tip >}} {{% self-managed/gcp-terraform-upgrade-notes %}} - See [Materialize on GCP releases](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) for notable changes. + See [Materialize on GCP releases](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-gcp-terraform-module) for notable changes. {{}} diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md new file mode 100644 index 0000000000000..7b132ec3e2aac --- /dev/null +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md @@ -0,0 +1,240 @@ +--- +title: "Upgrade on AWS" +description: "Upgrade Materialize on AWS using the Unified Terraform module." +menu: + main: + parent: "upgrading" + weight: 20 +--- + +The following tutorial upgrades your Materialize deployment running on AWS +Elastic Kubernetes Service (EKS). The tutorial assumes you have installed +Materialize on AWS using the instructions on [Install on +AWS](/self-managed-deployments/installation/install-on-aws/). + +## Upgrade guidelines + +{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" +>}} + +{{< include-md file="shared-content/self-managed/version-compatibility-upgrade-banner.md" >}} + +## Prerequisites + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +## Procedure + +{{< important >}} + +The following procedure performs a rolling upgrade, where both the old and new Materialize instances are running before the old instances are removed. When performing a rolling upgrade, ensure you have enough resources to support having both the old and new Materialize instances running. + +{{}} + +### Step 1: Set up + +1. Open a Terminal window. + +1. Configure AWS CLI with your AWS credentials. For details, see the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). + +1. Go to the Terraform directory for your Materialize deployment. For example, + if you deployed from the `aws/examples/simple` directory: + + ```bash + cd materialize-terraform-self-managed/aws/examples/simple + ``` + +1. Configure `kubectl` to connect to your EKS cluster, replacing: + + - `` with the name of your EKS cluster. Your cluster name can be found in the Terraform output or AWS console. + + - `` with the region of your EKS cluster. + + ```bash + aws eks update-kubeconfig --name --region + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl get nodes + ``` + + For help with `kubectl` commands, see [kubectl Quick reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +### Step 2: Upgrading the Materialize Operator + +{{< important >}} + +When upgrading Materialize, always upgrade the operator first. + +{{}} + +The Materialize Kubernetes operator is deployed via Helm and can be updated through standard Helm upgrade commands. + + +1. Check the current operator version: + + ```bash + helm list -n materialize + ``` + +1. Upgrade the Materialize operator using Helm: + + - If your deployment has not specified custom settings: + + ```bash + helm upgrade materialize-operator materialize/materialize-operator \ + -n materialize \ + --version + ``` + + Replace `` with the desired operator version. + + - If your deployment has specified custom settings, make sure to include your + values file: + + ```bash + helm upgrade materialize-operator materialize/materialize-operator \ + -n materialize \ + --version \ + -f my-values.yaml + ``` + +1. Verify that the operator is running: + + ```bash + kubectl -n materialize get all + ``` + + Verify the operator upgrade by checking its events: + + ```bash + kubectl -n materialize describe pod -l app.kubernetes.io/name=materialize-operator + ``` + + - The **Containers** section should show the `--helm-chart-version` argument set to the new version. + - The **Events** section should list that the new version of the orchestratord has been pulled. + +## Step 3: Upgrading Materialize Instances + +In order to minimize unexpected downtime and avoid connection drops at critical periods for your application, changes are not immediately and automatically rolled out by the Operator. Instead, the upgrade process involves two steps: +- First, staging spec changes to the Materialize custom resource. +- Second, applying the changes via a `requestRollout`. + +### Updating the `environmentdImageRef` + +To find a compatible version with your currently deployed Materialize operator, check the `appVersion` in the Helm repository: + +```bash +helm list -n materialize +``` + +Using the returned version, we can construct an image ref. We always recommend using the official Materialize image repository `docker.io/materialize/environmentd`. + +The following is an example of how to patch the version: + +```bash +# For version updates, first update the image reference +kubectl patch materialize \ + -n \ + --type='merge' \ + -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" +``` + +Replace: +- `` with your Materialize instance name (typically a UUID). +- `` with your instance namespace (typically `materialize-environment`). + +### Applying the changes via `requestRollout` + +To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. + +Be sure to consult the [Rollout Configurations](/self-managed-deployments/upgrading/#rollout-strategies) to ensure you've selected the correct rollout behavior. + +```bash +# Then trigger the rollout with a new UUID +kubectl patch materialize \ + -n \ + --type='merge' \ + -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" +``` + +It is possible to combine both operations in a single command if preferred: + +```bash +kubectl patch materialize \ + -n materialize-environment \ + --type='merge' \ + -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" +``` + +### Using YAML Definition + +Alternatively, you can update your Materialize custom resource definition directly: + +```yaml +apiVersion: materialize.cloud/v1alpha1 +kind: Materialize +metadata: + name: + namespace: materialize-environment +spec: + environmentdImageRef: docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}} # Update version as needed + requestRollout: # Generate new UUID + rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. Can be WaitUntilReady or ImmediatelyPromoteCausingDowntime + backendSecretName: materialize-backend +``` + +Apply the updated definition: + +```bash +kubectl apply -f materialize.yaml +``` + +## Verifying the Upgrade + +After initiating the rollout, you can monitor the status field of the Materialize custom resource to check on the upgrade. + +```bash +# Watch the status of your Materialize environment +kubectl get materialize -n materialize-environment -w + +# Check the logs of the operator +kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize +``` + +Verify that the components are running after the upgrade: + +```bash +kubectl -n materialize-environment get all +``` + +Verify upgrade by checking the `balancerd` events: + +```bash +kubectl -n materialize-environment describe pod -l app=balancerd +``` + +The **Events** section should list that the new version of the `balancerd` has been pulled. + +Verify the upgrade by checking the `environmentd` events: + +```bash +kubectl -n materialize-environment describe pod -l app=environmentd +``` + +The **Events** section should list that the new version of the `environmentd` has been pulled. + +Open the Materialize Console. The Console should display the new version. + +## See also + +- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Materialize CRD Field Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/installation/install-on-local-kind/upgrade-on-local-kind.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md similarity index 80% rename from doc/user/content/installation/install-on-local-kind/upgrade-on-local-kind.md rename to doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md index 286b9b970f65a..599275c83632b 100644 --- a/doc/user/content/installation/install-on-local-kind/upgrade-on-local-kind.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md @@ -3,19 +3,19 @@ title: "Upgrade on kind" description: "Upgrade Materialize running locally on a kind cluster." menu: main: - parent: "install-on-local-kind" - identifier: "upgrade-on-local-kind" -weight: 10 + parent: "upgrading" + weight: 10 aliases: - - /installation/install-on-local-minikube/upgrade-on-local-minikube/ - /self-managed/v25.1/installation/install-on-local-kind/upgrade-on-local-kind/ + - /installation/install-on-local-kind/upgrade-on-local-kind/ --- To upgrade your Materialize instances, first choose a new operator version and upgrade the Materialize operator. Then, upgrade your Materialize instances to the same version. The following tutorial upgrades your Materialize deployment running locally on a [`kind`](https://kind.sigs.k8s.io/) cluster. The tutorial assumes you have installed Materialize on `kind` using the -instructions on [Install locally on kind](/installation/install-on-local-kind/). +instructions on [Install locally on +kind](/self-managed-deployments/installation/install-on-local-kind/). {{< include-md file="shared-content/self-managed/version-compatibility-upgrade-banner.md" >}} @@ -53,6 +53,6 @@ having both the old and new Materialize instances running. ## See also -- [Materialize Operator Configuration](/installation/configuration/) -- [Troubleshooting](/installation/troubleshooting/) -- [Installation](/installation/) +- [Materialize Operator + Configuration](/self-managed-deployments/appendix/configuration/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/data/self_managed/aws_terraform_deployed_components.yml b/doc/user/data/self_managed/aws_terraform_deployed_components.yml index aa25f746aa432..b8ce5adc576fb 100644 --- a/doc/user/data/self_managed/aws_terraform_deployed_components.yml +++ b/doc/user/data/self_managed/aws_terraform_deployed_components.yml @@ -20,14 +20,14 @@ rows: AWS Load Balancer Controller and Network Load Balancers for each Materialize instance Version: | - [v0.3.0+](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) + [v0.3.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-aws-terraform-module) - Component: | OpenEBS and NVMe instance storage to enable spill-to-disk Version: | - [v0.3.1+](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) + [v0.3.1+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-aws-terraform-module) - Component: | `cert-manager` and a self-signed `ClusterIssuer`. `ClusterIssuer` is deployed on subsequent runs after the `cert-manager` is running. Version: | - [v0.4.0+](/installation/appendix-terraforms/#materialize-on-aws-terraform-module) + [v0.4.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-aws-terraform-module) diff --git a/doc/user/data/self_managed/azure_terraform_deployed_components.yml b/doc/user/data/self_managed/azure_terraform_deployed_components.yml index b293fe8fe0a30..f30a2382efc4d 100644 --- a/doc/user/data/self_managed/azure_terraform_deployed_components.yml +++ b/doc/user/data/self_managed/azure_terraform_deployed_components.yml @@ -23,12 +23,12 @@ rows: `ClusterIssuer` is deployed on subsequent runs after the `cert-manager` is running. Version: | - [v0.3.0+](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) + [v0.3.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-azure-terraform-module) - Component: | Load balancers for each Materialize instance Version: | - [v0.3.1+](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) + [v0.3.1+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-azure-terraform-module) - Component: | OpenEBS and NVMe instance storage to enable spill-to-disk Version: | - [v0.4.0+](/installation/appendix-terraforms/#materialize-on-azure-terraform-module) + [v0.4.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-azure-terraform-module) diff --git a/doc/user/data/self_managed/default_ports.yml b/doc/user/data/self_managed/default_ports.yml new file mode 100644 index 0000000000000..92b7ed134446e --- /dev/null +++ b/doc/user/data/self_managed/default_ports.yml @@ -0,0 +1,11 @@ +columns: + - column: "Port" + - column: "Description" + +rows: + - "Port": "6875" + "Description": "For SQL connections to the database" + - "Port": "6876" + "Description": "For HTTP(S) connections to the database" + - "Port": "8080" + "Description": "For HTTP(S) connections to Materialize Console" diff --git a/doc/user/data/self_managed/gcp_terraform_deployed_components.yml b/doc/user/data/self_managed/gcp_terraform_deployed_components.yml index a094584f31f06..1cacbd46ce7bb 100644 --- a/doc/user/data/self_managed/gcp_terraform_deployed_components.yml +++ b/doc/user/data/self_managed/gcp_terraform_deployed_components.yml @@ -22,16 +22,16 @@ rows: - Component: | Load balancers for each Materialize instance Version: | - [v0.3.0+](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) + [v0.3.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-gcp-terraform-module) - Component: | `cert-manager` and a self-signed `ClusterIssuer`. `ClusterIssuer` is deployed on subsequent runs after the `cert-manager` is running. Version: | - [v0.3.0+](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) + [v0.3.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-gcp-terraform-module) - Component: | OpenEBS and NVMe instance storage to enable spill-to-disk Version: | - [v0.4.0+](/installation/appendix-terraforms/#materialize-on-gcp-terraform-module) + [v0.4.0+](/self-managed-deployments/appendix/legacy/appendix-legacy-terraform-releases/#materialize-on-gcp-terraform-module) diff --git a/doc/user/data/self_managed/installation.yml b/doc/user/data/self_managed/installation.yml new file mode 100644 index 0000000000000..e440ee31146c8 --- /dev/null +++ b/doc/user/data/self_managed/installation.yml @@ -0,0 +1,76 @@ +- name: installation-landing-intro + content: | + You can install Self-Managed Materialize on a Kubernetes cluster running + locally or on a cloud provider. Self-Managed Materialize requires: + + {{% self-managed/materialize-components-list %}} + + ## License key + + Starting in v26.0, Materialize requires a license key. + + {{< yaml-table data="self_managed/license_key" >}} + + ## Installation guides + + The following installation guides are available to help you get started: + +- name: installation-landing-guides-helm + content: | + + ### Install using Helm Commands + + | Guide | Description | + | ------------- | -------| + | [Install locally on Kind](/self-managed-deployments/installation/install-on-local-kind/) | Uses standard Helm commands to deploy Materialize to a Kind cluster in Docker. + + +- name: installation-landing-guides-unified + content: | + ### Install using Unified Terraform Modules + + + | Guide | Description | + | ------------- | -------| + | [Install on AWS](/self-managed-deployments/installation/install-on-aws/) |Uses Terraform module to deploy Materialize to AWS Elastic Kubernetes Service (EKS). + | [Install on Azure](/self-managed-deployments/installation/install-on-azure/) | Uses Terraform module to deploy Materialize to Azure Kubernetes Service (AKS). + | [Install on GCP](/self-managed-deployments/installation/install-on-gcp/) | Uses Terraform module to deploy Materialize to Google Kubernetes Engine (GKE). + + +- name: installation-landing-guides-legacy + content: | + + ### Install using Legacy Terraform Modules + + | Guide | Description | + | ------------- | ----------- | + | [Install on AWS (Legacy Terraform)](/self-managed-deployments/installation/legacy/install-on-aws-legacy/) | Uses legacy Terraform module to deploy Materialize to AWS Elastic Kubernetes Service (EKS). + | [Install on Azure (Legacy Terraform)](/self-managed-deployments/installation/legacy/install-on-azure-legacy/) | Uses legacy Terraform module to deploy Materialize to Azure Kubernetes Service (AKS). + | [Install on GCP (Legacy Terraform)](/self-managed-deployments/installation/legacy/install-on-gcp-legacy/) | Uses legacy Terraform module to deploy Materialize to Google Kubernetes Engine (GKE). + +- name: install-uses-self-signed-cluster-issuer + content: | + The example uses a self-signed ClusterIssuer. As such, you may encounter a + warning with regards to the certificate. In production, run with + certificates from an official Certificate Authority (CA) rather than + self-signed certificates. + +- name: installation-verify-status + content: | + {{< tabs >}} + {{< tab "Operator" >}} + To check the status of the Materialize operator, which runs in the `materialize` namespace: + ```bash + kubectl -n materialize get all + ``` + {{< /tabs >}} + {{< tab "Materialize instance" >}} + To check the status of the Materialize instance, which runs in the `materialize-environment` namespace: + ```bash + kubectl -n materialize-environment get all + ``` + {{< /tab >}} + {{< /tabs >}} + + If you run into an error during deployment, refer to the + [Troubleshooting](/self-managed-deployments/troubleshooting/). diff --git a/doc/user/data/self_managed/terraform_list.yml b/doc/user/data/self_managed/terraform_list.yml index 94c2481bafd32..94f6f99e7fcf3 100644 --- a/doc/user/data/self_managed/terraform_list.yml +++ b/doc/user/data/self_managed/terraform_list.yml @@ -6,17 +6,17 @@ rows: - "Module": | [Amazon Web Services (AWS)](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) "Description": | - An example Terraform module for deploying Materialize on AWS - See [Install on Azure](/installation/install-on-gcp/) for detailed instructions usage. + An example Terraform module for deploying Materialize on AWS. + See [Install on AWS](/self-managed-deployments/installation/install-on-aws/) for detailed instructions usage. - "Module": | [Azure](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) "Description": | - An example Terraform module for deploying Materialize on Azure - See [Install on Azure](/installation/install-on-azure/) for detailed instructions usage. + An example Terraform module for deploying Materialize on Azure. + See [Install on Azure](/self-managed-deployments/installation/install-on-azure/) for detailed instructions usage. - "Module": | [Google Cloud Platform (GCP)](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) "Description": | - An example Terraform module for deploying Materialize on GCP - See [Install on Azure](/installation/install-on-gcp/) for detailed instructions usage. + An example Terraform module for deploying Materialize on GCP. + See [Install on GCP](/self-managed-deployments/installation/install-on-gcp/) for detailed instructions usage. diff --git a/doc/user/data/self_managed/legacy_terraform_list.yml b/doc/user/data/self_managed/terraform_list_legacy.yml similarity index 50% rename from doc/user/data/self_managed/legacy_terraform_list.yml rename to doc/user/data/self_managed/terraform_list_legacy.yml index 8170eb218d06f..d430f5c42e867 100644 --- a/doc/user/data/self_managed/legacy_terraform_list.yml +++ b/doc/user/data/self_managed/terraform_list_legacy.yml @@ -4,26 +4,26 @@ columns: rows: - "Sample Module": | - [terraform-helm-materialize](https://github.com/MaterializeInc/terraform-helm-materialize) + [terraform-helm-materialize (Legacy)](https://github.com/MaterializeInc/terraform-helm-materialize) "Description": | A sample Terraform module for installing the Materialize Helm chart into a Kubernetes cluster. - "Sample Module": | - [Materialize on AWS](https://github.com/MaterializeInc/terraform-aws-materialize) + [Materialize on AWS (Legacy)](https://github.com/MaterializeInc/terraform-aws-materialize) "Description": | A sample Terraform module for deploying Materialize on AWS Cloud Platform with all required infrastructure components. - See [Install on AWS](/installation/install-on-aws/) for an example usage. + See [Install on AWS (Legacy)](/self-managed-deployments/installation/legacy/install-on-aws-legacy/) for an example usage. - "Sample Module": | - [Materialize on Azure](https://github.com/MaterializeInc/terraform-azurerm-materialize) + [Materialize on Azure (Legacy)](https://github.com/MaterializeInc/terraform-azurerm-materialize) "Description": | A sample Terraform module for deploying Materialize on Azure with all required infrastructure components. See [Install on - Azure](/installation/install-on-azure/) for an example usage. + Azure](/self-managed-deployments/installation/legacy/install-on-azure-legacy/) for an example usage. - "Sample Module": | - [Materialize on Google Cloud Platform (GCP)](https://github.com/MaterializeInc/terraform-google-materialize) + [Materialize on GCP (Legacy)](https://github.com/MaterializeInc/terraform-google-materialize) "Description": | A sample Terraform module for deploying Materialize on Google Cloud Platform - (GCP) with all required infrastructure components. See [Install on GCP](/installation/install-on-gcp/) for an example usage. + (GCP) with all required infrastructure components. See [Install on GCP](/self-managed-deployments/installation/legacy/install-on-gcp-legacy/) for an example usage. diff --git a/doc/user/data/self_managed/upgrades.yml b/doc/user/data/self_managed/upgrades.yml new file mode 100644 index 0000000000000..0d6102bbde202 --- /dev/null +++ b/doc/user/data/self_managed/upgrades.yml @@ -0,0 +1,39 @@ +- name: upgrades-general-rules + content: | + When upgrading: + + - **Always** check the [version specific upgrade + notes](/self-managed-deployments/upgrading/#version-specific-upgrade-notes). + + - **Always** upgrade the operator **first** and ensure version compatibility + between the operator and the Materialize instance you are upgrading to. + + - **Always** upgrade your Materialize instances **after** upgrading the operator + to ensure compatibility. + +- name: upgrade-landing-guides-helm + content: | + + ### Upgrade using Helm Commands + + | Guide | Description | + | ------------- | -------| + | [Upgrade on Kind](/self-managed-deployments/upgrading/upgrade-on-kind/) | Uses standard Helm commands to upgrade Materialize on a Kind cluster in Docker. + + +- name: upgrade-landing-guides-unified + content: | + ### Upgrade using Unified Terraform Modules + + + +- name: upgrade-landing-guides-legacy + content: | + + ### Upgrade using Legacy Terraform Modules + + | Guide | Description | + | ------------- | ----------- | + | [Upgrade on AWS (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy/) | Uses legacy Terraform module to deploy Materialize to AWS Elastic Kubernetes Service (EKS). + | [Upgrade on Azure (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy/) | Uses legacy Terraform module to deploy Materialize to Azure Kubernetes Service (AKS). + | [Upgrade on GCP (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy/) | Uses legacy Terraform module to deploy Materialize to Google Kubernetes Engine (GKE). diff --git a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html index 50ec7c43e0ff5..811158c414722 100644 --- a/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/aws-terraform-configs.html @@ -6,4 +6,4 @@ For more configuration options, you can use the `main.tf` file at the [root of the repository](https://github.com/MaterializeInc/terraform-aws-materialize/) instead. When running with the root `main.tf`, see [AWS required -configuration](/installation/install-on-aws/legacy-terraform-module/appendix-configuration/). +configuration](/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-aws/). diff --git a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html index f6da453b4ef44..cd042d70bfe90 100644 --- a/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/azure-terraform-configs.html @@ -7,4 +7,4 @@ the repository](https://github.com/MaterializeInc/terraform-azurerm-materialize/) instead. When running with the root `main.tf`, see [Azure required -configuration](/installation/install-on-azure/legacy-terraform-module/appendix-configuration/). +configuration](/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-azure/). diff --git a/doc/user/layouts/shortcodes/self-managed/gcp-terraform-configs.html b/doc/user/layouts/shortcodes/self-managed/gcp-terraform-configs.html index c60a45ed552e1..54cb717e27239 100644 --- a/doc/user/layouts/shortcodes/self-managed/gcp-terraform-configs.html +++ b/doc/user/layouts/shortcodes/self-managed/gcp-terraform-configs.html @@ -6,4 +6,4 @@ For more configuration options, you can use the `main.tf` file at the [root of the repository](https://github.com/MaterializeInc/terraform-google-materialize/) instead. When running with the root `main.tf`, see [GCP required -configuration](/installation/install-on-gcp/appendix-gcp-configuration/). +configuration](/self-managed-deployments/appendix/legacy/appendix-configuration-legacy-gcp/). diff --git a/doc/user/layouts/shortcodes/self-managed/next-steps.html b/doc/user/layouts/shortcodes/self-managed/next-steps.html index 11312eb350be8..73e25e0ca7a49 100644 --- a/doc/user/layouts/shortcodes/self-managed/next-steps.html +++ b/doc/user/layouts/shortcodes/self-managed/next-steps.html @@ -3,4 +3,4 @@ [Quickstart](/get-started/quickstart/). - To start ingesting your own data from an external system like Kafka, MySQL or - PostgreSQL, check the documentation for [sources](/sql/create-source/). + PostgreSQL, see [Ingest data](/ingest-data/). diff --git a/doc/user/layouts/shortcodes/self-managed/troubleshoot-console-mz_catalog_server_blurb.md b/doc/user/layouts/shortcodes/self-managed/troubleshoot-console-mz_catalog_server_blurb.md index ebd012ee58fdf..6e3c8431e46e8 100644 --- a/doc/user/layouts/shortcodes/self-managed/troubleshoot-console-mz_catalog_server_blurb.md +++ b/doc/user/layouts/shortcodes/self-managed/troubleshoot-console-mz_catalog_server_blurb.md @@ -1,5 +1,5 @@ If you experience long loading screens or unresponsiveness in the Materialize Console, we recommend increasing the size of the `mz_catalog_server` cluster. Refer to the [Troubleshooting Console -Unresponsiveness](/installation/troubleshooting/#troubleshooting-console-unresponsiveness) +Unresponsiveness](/self-managed-deployments/troubleshooting/#troubleshooting-console-unresponsiveness) guide. diff --git a/doc/user/layouts/shortcodes/self-managed/versions/get-latest-version.html b/doc/user/layouts/shortcodes/self-managed/versions/get-latest-version.html new file mode 100644 index 0000000000000..b8cdd679e4bde --- /dev/null +++ b/doc/user/layouts/shortcodes/self-managed/versions/get-latest-version.html @@ -0,0 +1,3 @@ +{{ $environmentd_version := +site.Data.self_managed.latest_versions.environmentd_version }} +{{- $environmentd_version -}} diff --git a/doc/user/layouts/shortcodes/self-managed/versions/step-install-helm-version-local-minikube-install.html b/doc/user/layouts/shortcodes/self-managed/versions/step-install-helm-version-local-minikube-install.html deleted file mode 100644 index ff81308fcdebe..0000000000000 --- a/doc/user/layouts/shortcodes/self-managed/versions/step-install-helm-version-local-minikube-install.html +++ /dev/null @@ -1,13 +0,0 @@ -{{ $operator_version := site.Data.self_managed.latest_versions.operator_helm_chart_version }} - -1. Install the Materialize Operator. The operator will be installed in the -`materialize` namespace. - - ```shell - helm install my-materialize-operator materialize/materialize-operator \ - --namespace=materialize --create-namespace \ - --version {{ $operator_version }} \ - --set operator.cloudProvider.region=minikube \ - --set observability.podMetrics.enabled=true \ - -f sample-values.yaml - ``` diff --git a/doc/user/shared-content/rbac-sm/enable-rbac.md b/doc/user/shared-content/rbac-sm/enable-rbac.md index 44afc438b1930..b9160b76575f9 100644 --- a/doc/user/shared-content/rbac-sm/enable-rbac.md +++ b/doc/user/shared-content/rbac-sm/enable-rbac.md @@ -8,12 +8,13 @@ enable RBAC, set the system parameter `enable_rbac_checks` to `'on'` or `True`. You can enable the parameter in one of the following ways: - For [local installations using - Kind/Minikube](/installation/#installation-guides), set `spec.enableRbac: + Kind/Minikube](/self-managed-deployments/installation/#installation-guides), set `spec.enableRbac: true` option when instantiating the Materialize object. - For [Cloud deployments using Materialize's - Terraforms](/installation/#installation-guides), set `enable_rbac_checks` in - the environment CR via the `environmentdExtraArgs` flag option. + Terraforms](/self-managed-deployments/installation/#installation-guides), set + `enable_rbac_checks` in the environment CR via the `environmentdExtraArgs` + flag option. - After the Materialize instance is running, run the following command as `mz_system` user: diff --git a/doc/user/shared-content/self-managed/general-rules-for-upgrades.md b/doc/user/shared-content/self-managed/general-rules-for-upgrades.md index 3918b65011f2d..86f661ddcde23 100644 --- a/doc/user/shared-content/self-managed/general-rules-for-upgrades.md +++ b/doc/user/shared-content/self-managed/general-rules-for-upgrades.md @@ -1,10 +1,10 @@ Whe upgrading: -- **Always** upgrade the operator first and ensure version compatibility between - the operator and the Materialize instance you are upgrading to. +- **Always** check the [version specific upgrade + notes](/self-managed-deployments/upgrading/#version-specific-upgrade-notes). -- **Always** upgrade your Materialize instances after upgrading the operator to - ensure compatibility. +- **Always** upgrade the operator **first** and ensure version compatibility + between the operator and the Materialize instance you are upgrading to. -- **Always check** the [version specific upgrade - notes](/installation/upgrading/#version-specific-upgrade-notes). +- **Always** upgrade your Materialize instances **after** upgrading the operator + to ensure compatibility. diff --git a/doc/user/shared-content/self-managed/install-landing-page.md b/doc/user/shared-content/self-managed/install-landing-page.md deleted file mode 100644 index decb48f54fd2d..0000000000000 --- a/doc/user/shared-content/self-managed/install-landing-page.md +++ /dev/null @@ -1,31 +0,0 @@ -You can install Self-Managed Materialize on a Kubernetes cluster running locally -or on a cloud provider. Self-Managed Materialize requires: - -{{% self-managed/materialize-components-list %}} - -## License key - -Starting in v26.0, Materialize requires a license key. - -{{< yaml-table data="self_managed/license_key" >}} - -## Install - -### Installation guides - -The following installation guides are available: - -| | Notes | -| ------------- | -------| -| [Install locally on kind](/installation/install-on-local-kind/) | -| [Deploy Materialize to AWS Elastic Kubernetes Service (EKS)](/installation/install-on-aws/) | Uses Materialize provided Terraform | -| [Deploy Materialize to Azure Kubernetes Service (AKS)](/installation/install-on-azure/) | Uses Materialize provided Terraform | -| [Deploy Materialize to Google Kubernetes Engine (GKE)](/installation/install-on-gcp/) | Uses Materialize provided Terraform | - -See also: -- [AWS Deployment - guidelines](/installation/install-on-aws/appendix-deployment-guidelines/#recommended-instance-types) -- [GCP Deployment - guidelines](/installation/install-on-gcp/appendix-deployment-guidelines/#recommended-instance-types) -- [Azure Deployment - guidelines](/installation/install-on-azure/appendix-deployment-guidelines/#recommended-instance-types) diff --git a/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md b/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md index 946aaeb166d4a..b51a9ec66812f 100644 --- a/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md +++ b/doc/user/shared-content/self-managed/prepare-nodes-and-upgrade.md @@ -15,7 +15,7 @@ Materialize-provided Terraforms. {{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" >}} -See also [Upgrade Overview](/installation/upgrading/) +See also [General notes for upgrades](/self-managed-deployments/upgrading/) {{< /tip >}} 1. Label existing scratchfs/lgalloc node groups. diff --git a/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md b/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md index bda5cb0245b66..a59c43a810a08 100644 --- a/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md +++ b/doc/user/shared-content/self-managed/upgrade-notes/v26.0.md @@ -8,7 +8,7 @@ - `ImmediatelyPromoteCausingDowntime` For more information, see - [`rolloutStrategy`](/installation/upgrading/#rollout-strategies). + [`rolloutStrategy`](/self-managed-deployments/upgrading/#rollout-strategies). - New requirements were introduced for [license keys](/releases/#license-key). To upgrade, you will first need to add a license key to the `backendSecret` @@ -26,4 +26,4 @@ `v0.6.1` of the Terraform. - If you are **not** using a Materialize-provided Terraform, refer - to [Prepare for swap and upgrade to v26.0](/installation/upgrade-to-swap/). + to [Prepare for swap and upgrade to v26.0](/self-managed-deployments/appendix/upgrade-to-swap/). From d35656f06c95c19082e95527669db98371d2e221 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Mon, 8 Dec 2025 11:19:09 -0500 Subject: [PATCH 04/11] fix change in release file location --- doc/user/content/self-managed-deployments/appendix/_index.md | 1 - .../self-managed-deployments/upgrading/upgrade-on-aws.md | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/user/content/self-managed-deployments/appendix/_index.md b/doc/user/content/self-managed-deployments/appendix/_index.md index fe6aadb199778..098b4a41ff641 100644 --- a/doc/user/content/self-managed-deployments/appendix/_index.md +++ b/doc/user/content/self-managed-deployments/appendix/_index.md @@ -15,4 +15,3 @@ disable_list: true - [Appendix: Materialize CRD Field Descriptions](./materialize-crd-field-descriptions/) - [Appendix: Cluster sizes](./appendix-cluster-sizes/) - [Appendix: Prepare for swap and upgrade to v26.0](./upgrade-to-swap/) -- [Appendix: Self-managed release versions](./release-versions/) diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md index 7b132ec3e2aac..a708bfbd62c96 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md @@ -87,7 +87,7 @@ The Materialize Kubernetes operator is deployed via Helm and can be updated thro 1. Upgrade the Materialize operator using Helm: - If your deployment has not specified custom settings: - + ```bash helm upgrade materialize-operator materialize/materialize-operator \ -n materialize \ From d6bb986a73eb8b4d6e0caeb5ce398747f4aefb69 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Mon, 8 Dec 2025 12:51:20 -0500 Subject: [PATCH 05/11] Update upgrade + move crd + operator configs --- .../self-managed-deployments/_index.md | 22 +- .../appendix/_index.md | 2 - .../deployment-guidelines/_index.md | 9 +- .../content/self-managed-deployments/faq.md | 7 +- .../installation/install-on-aws.md | 7 +- .../installation/install-on-azure.md | 4 +- .../installation/install-on-gcp.md | 97 +++++---- .../legacy/install-on-aws-legacy.md | 3 +- .../materialize-crd-field-descriptions.md | 4 +- ...iguration.md => operator-configuration.md} | 4 +- .../release-versions.md | 2 +- .../upgrading/_index.md | 126 ++++++++---- .../upgrading/legacy/_index.md | 2 +- .../legacy/upgrade-on-azure-legacy.md | 2 +- .../upgrading/legacy/upgrade-on-gcp-legacy.md | 2 +- .../upgrading/upgrade-on-aws.md | 189 +++--------------- .../upgrading/upgrade-on-azure.md | 113 +++++++++++ .../upgrading/upgrade-on-gcp.md | 121 +++++++++++ .../upgrading/upgrade-on-kind.md | 2 +- doc/user/data/self_managed/upgrades.yml | 159 ++++++++++++++- 20 files changed, 606 insertions(+), 271 deletions(-) rename doc/user/content/self-managed-deployments/{appendix => }/materialize-crd-field-descriptions.md (84%) rename doc/user/content/self-managed-deployments/{appendix/configuration.md => operator-configuration.md} (96%) create mode 100644 doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md create mode 100644 doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md diff --git a/doc/user/content/self-managed-deployments/_index.md b/doc/user/content/self-managed-deployments/_index.md index 6dd2790e881d9..0b33e376d80f3 100644 --- a/doc/user/content/self-managed-deployments/_index.md +++ b/doc/user/content/self-managed-deployments/_index.md @@ -107,7 +107,7 @@ The operator watches for Materialize custom resources and creates/manages all th For configuration options for the Materialize Operator, see the [Materialize Operator Configuration -page](/self-managed-deployments/appendix/configuration/). +page](/self-managed-deployments/operator-configuration/). ## Materialize Instance @@ -151,7 +151,7 @@ A Materialize instance manages: To deploy Materialize instances with the operator, create and apply Materialize custom resources definitions(CRDs). For a full list of fields available for the Materialize CR, see [Materialize CRD Field -Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/). +Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/). ```yaml apiVersion: materialize.cloud/v1alpha1 @@ -179,7 +179,7 @@ watches for updates but does not roll out the changes. For a full list of fields available for the Materialize CR, see [Materialize CRD Field -Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/). +Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/). See also: @@ -187,8 +187,8 @@ See also: ### Connecting to an instance -Once deployed, you interact with a Materialize instance through standard -PostgreSQL-compatible tools and drivers: +Once deployed, you interact with a Materialize instance through the Materialize +Console or standard PostgreSQL-compatible tools and drivers: ```bash # Connect with psql @@ -279,8 +279,10 @@ components work together: operator detects this and creates all necessary Kubernetes resources, including the `environmentd`, `balancerd`, and `console` pods. -1. **Connect to the instance**: Use a SQL client to connect to the - `environmentd` service endpoint. +1. **Connect to the instance**: Use the Materialize Console on port 8080 (or SQL + client on port 6875) to connect to the `environmentd` service endpoint. + + If authentication is enabled, you must set up users. 1. **Create clusters**: Issue SQL commands to create clusters. Materialize coordinates with the operator to provision StatefulSets for replicas. @@ -340,8 +342,10 @@ SQL level. ## Related pages - [Installation guides](/self-managed-deployments/installation/) -- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) -- [Materialize CRD Field Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) - [Operational guidelines](/self-managed-deployments/deployment-guidelines/) - [Clusters concept page](/concepts/clusters/) - [Materialize architecture overview](/concepts/) diff --git a/doc/user/content/self-managed-deployments/appendix/_index.md b/doc/user/content/self-managed-deployments/appendix/_index.md index 098b4a41ff641..65419c0b88596 100644 --- a/doc/user/content/self-managed-deployments/appendix/_index.md +++ b/doc/user/content/self-managed-deployments/appendix/_index.md @@ -11,7 +11,5 @@ disable_list: true ## Table of contents -- [Appendix: Materialize Operator Configuration Parameters](./configuration/) -- [Appendix: Materialize CRD Field Descriptions](./materialize-crd-field-descriptions/) - [Appendix: Cluster sizes](./appendix-cluster-sizes/) - [Appendix: Prepare for swap and upgrade to v26.0](./upgrade-to-swap/) diff --git a/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md b/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md index 9dc3c6dbed28e..62fba97a2ef90 100644 --- a/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md +++ b/doc/user/content/self-managed-deployments/deployment-guidelines/_index.md @@ -1,7 +1,7 @@ --- title: "Deployment guidelines" description: "" -disable_list: false +disable_list: true menu: main: parent: "sm-deployments" @@ -16,3 +16,10 @@ aliases: ## Available deployment guidelines The following provides guidelines for cloud-specific deployments: + +- [AWS Deployment + Guidelines](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines/) +- [Azure Deployment + Guidelines](/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines/) +- [GCP Deployment + Guidelines](/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines/) diff --git a/doc/user/content/self-managed-deployments/faq.md b/doc/user/content/self-managed-deployments/faq.md index 710a25a4d0f5f..a355d5fbf547d 100644 --- a/doc/user/content/self-managed-deployments/faq.md +++ b/doc/user/content/self-managed-deployments/faq.md @@ -1,5 +1,5 @@ --- -title: "FAQ: Self-managed deployments" +title: "FAQ" description: "Frequently asked questions about self-managed deployments." aliases: - /self-hosted/faq/ @@ -28,3 +28,8 @@ existing installation, run: ```bash kubectl -n materialize-environment patch secret materialize-backend -p '{"stringData":{"license_key":""}}' --type=merge ``` + +## How can I downgrade Self-Managed Materialize? + +{{< include-from-yaml data="self_managed/upgrades" +name="downgrade-restriction" >}} diff --git a/doc/user/content/self-managed-deployments/installation/install-on-aws.md b/doc/user/content/self-managed-deployments/installation/install-on-aws.md index 2a3143031959b..f65d9846fdc93 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-aws.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-aws.md @@ -84,8 +84,8 @@ This example provisions the following infrastructure: | Resource | Description | |----------|-------------| -| Operator | Materialize Kubernetes operator | -| Instance | Single Materialize instance in `materialize-environment` namespace | +| Operator | Materialize Kubernetes operator in the `materialize` namespace | +| Instance | Single Materialize instance in the `materialize-environment` namespace | | Network Load Balancer | Dedicated internal NLB for Materialize access {{< yaml-table data="self_managed/default_ports" >}} | @@ -271,5 +271,6 @@ guide](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines ## See Also -- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index f2135765fd440..f668199174f0c 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -84,8 +84,8 @@ This example provisions the following infrastructure: | Resource | Description | |----------|-------------| -| Operator | Materialize Kubernetes operator | -| Instance | Single Materialize instance in `materialize-environment` namespace | +| Operator | Materialize Kubernetes operator in the `materialize` namespace | +| Instance | Single Materialize instance in the `materialize-environment` namespace | | Load Balancers | Internal Azure Load Balancers for Materialize access | ## Prerequisites diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md index 1d09caf4494d2..3abe3f7fbb19a 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -79,24 +79,22 @@ This example provisions the following infrastructure: | Resource | Description | |----------|-------------| -| Operator | Materialize Kubernetes operator in `materialize` namespace | -| Instance | Single Materialize instance in `materialize-environment` namespace | +| Operator | Materialize Kubernetes operator in the `materialize` namespace | +| Instance | Single Materialize instance in the `materialize-environment` namespace | | Load Balancers | GCP Load Balancers for Materialize access {{< yaml-table data="self_managed/default_ports" >}} | ## Prerequisites ### GCP Account Requirements -- A Google account with permission to enable Google Cloud APIs/services on for -your project. -- A [Google service -account](https://docs.cloud.google.com/iam/docs/service-accounts-create#creating) -with appropriate permissions to create: - - GKE clusters - - Cloud SQL instances - - Cloud Storage buckets - - VPC networks and networking resources - - Service accounts and IAM bindings +A Google account with permission to: +- Enable Google Cloud APIs/services on for your project. +- Create: + - GKE clusters + - Cloud SQL instances + - Cloud Storage buckets + - VPC networks and networking resources + - Service accounts and IAM bindings ### Required Tools @@ -130,14 +128,23 @@ with appropriate permissions to create: cd materialize-terraform-self-managed/gcp/examples/simple ``` -1. Initialize the gcloud CLI (`gcloud init`) to specify the GCP project you want - to use. For details, see the [Initializing the gcloud CLI - documentation](https://cloud.google.com/sdk/docs/initializing#initialize_the). +1. Authenticate to GCP with your user account. - {{< tip >}} - You do not need to configure a default Compute Region and Zone as you will - specify the region. - {{}} + ```bash + gcloud auth login + ``` + +1. Find the list of GCP projects: + + ```bash + gcloud projects list + ``` + +1. Set your active GCP project, substitute with your ``. + + ```bash + gcloud config set project + ``` 1. Enable the following APIs for your project: @@ -152,35 +159,25 @@ with appropriate permissions to create: gcloud services enable storage.googleapis.com # For Cloud Storage buckets ``` -1. For the service account, authenticate to allow Terraform to - interact with your GCP project. For details, see [Terraform: Google Cloud - Provider Configuration - reference](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#authentication). - - For example, if using [User Application Default - Credentials](https://cloud.google.com/sdk/gcloud/reference/auth/application-default), - you can run the following command: +1. Authenticate application default credentials for Terraform ```bash gcloud auth application-default login ``` - {{< tip >}} - If using `GOOGLE_APPLICATION_CREDENTIALS`, use absolute path to your key file. - {{}} - - ### Step 2: Configure Terraform Variables -1. Create a `terraform.tfvars` file with the following variables: +1. Create a `terraform.tfvars` file and specify the following variables: - - `project_id`: GCP project ID - - `name_prefix`: Prefix for all resource names (e.g., `simple-demo`) - - `region`: GCP region for deployment (e.g., `us-central1`) - - `license_key`: Materialize license key - - `labels`: Map of labels to apply to resources + | Variable | Description | + | ----------- | ----------------------------| + | `project_id` | Set to your GCP project ID. | + | `name_prefix` | Set a prefix for all resource names (e.g., `simple-demo`) as well as your release name for the Operator | + | `region` | Set the GCP region for the deployment (e.g., `us-central1`). | + | `license_key` | Set to your Materialize license key. | + | `labels` | Set to the labels to apply to resources. | - ```hcl + ```bash project_id = "my-gcp-project" name_prefix = "simple-demo" region = "us-central1" @@ -218,16 +215,28 @@ with appropriate permissions to create: to proceed. 1. From the output, you will need the following field(s) to connect: + - `` + +1. Configure `kubectl` to connect to your GKE cluster, replacing: + + - `` with the name of your GKE cluster. Your cluster name + can be found in the Terraform output. For the sample example, the cluster + name is `-eks`. -### Step 4. Optional. Verify the deployment. + - `` with the region of your GKE cluster. Your region can be + found in the Terraform output `gke_cluster_location`, corresponds to the + `region` value in your `terraform.tfvars`. -1. Configure `kubectl` to connect to your cluster: + - `` with your GCP project ID. ```bash - gcloud container clusters get-credentials \ - --region \ - --project + gcloud container clusters get-credentials \ + --region \ + --project ``` + +### Step 4. Optional. Verify the status of your deployment + 1. Check the status of your deployment: {{% include-from-yaml data="self_managed/installation" name="installation-verify-status" %}} diff --git a/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md b/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md index 10a8ad023265a..a06b61d473ef0 100644 --- a/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md +++ b/doc/user/content/self-managed-deployments/installation/legacy/install-on-aws-legacy.md @@ -474,5 +474,6 @@ components: ## See also -- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md b/doc/user/content/self-managed-deployments/materialize-crd-field-descriptions.md similarity index 84% rename from doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md rename to doc/user/content/self-managed-deployments/materialize-crd-field-descriptions.md index 480f04cba30ed..bca28ab7a50d5 100644 --- a/doc/user/content/self-managed-deployments/appendix/materialize-crd-field-descriptions.md +++ b/doc/user/content/self-managed-deployments/materialize-crd-field-descriptions.md @@ -3,9 +3,9 @@ title: "Materialize CRD Field Descriptions" description: "Reference page on Materialize CRD Fields" menu: main: - parent: "sm-deployments-appendix" + parent: "sm-deployments" identifier: "materialize-crd-field-descriptions" - weight: 15 + weight: 66 aliases: - /installation/appendix-materialize-crd-field-descriptions/ --- diff --git a/doc/user/content/self-managed-deployments/appendix/configuration.md b/doc/user/content/self-managed-deployments/operator-configuration.md similarity index 96% rename from doc/user/content/self-managed-deployments/appendix/configuration.md rename to doc/user/content/self-managed-deployments/operator-configuration.md index da618b25448e5..8ce9313942a04 100644 --- a/doc/user/content/self-managed-deployments/appendix/configuration.md +++ b/doc/user/content/self-managed-deployments/operator-configuration.md @@ -8,8 +8,8 @@ aliases: - /installation/configuration/ menu: main: - parent: "sm-deployments-appendix" - weight: 10 + parent: "sm-deployments" + weight: 65 --- ## Configure the Materialize operator diff --git a/doc/user/content/self-managed-deployments/release-versions.md b/doc/user/content/self-managed-deployments/release-versions.md index af435a4ec2e81..a200882295102 100644 --- a/doc/user/content/self-managed-deployments/release-versions.md +++ b/doc/user/content/self-managed-deployments/release-versions.md @@ -4,7 +4,7 @@ description: "" menu: main: parent: "sm-deployments" - weight: 85 + weight: 95 aliases: - /installation/release-versions/ --- diff --git a/doc/user/content/self-managed-deployments/upgrading/_index.md b/doc/user/content/self-managed-deployments/upgrading/_index.md index bee4ce077abb9..7b3814db1984a 100644 --- a/doc/user/content/self-managed-deployments/upgrading/_index.md +++ b/doc/user/content/self-managed-deployments/upgrading/_index.md @@ -9,73 +9,114 @@ menu: identifier: "upgrading" --- -## Upgrading guidelines +Materialize releases new Self-Managed versions per the schedule outlined in [Release schedule](/releases/schedule/#self-managed-release-schedule). -{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" >}} +## General rules for upgrading -## Upgrade guides +{{< include-from-yaml data="self_managed/upgrades" +name="upgrades-general-rules" >}} -The following upgrade guides are available: +{{< note >}} -{{% include-from-yaml data="self_managed/upgrades" -name="upgrade-landing-guides-helm" %}} - -{{% include-from-yaml data="self_managed/upgrades" -name="upgrade-landing-guides-legacy" %}} +{{< include-from-yaml data="self_managed/upgrades" +name="upgrade-major-version-restriction" >}} +{{< /note >}} -## Upgrading the Helm Chart and Kubernetes Operator +## Upgrading the Helm Chart and Materialize Operator {{< important >}} -When upgrading Materialize, always upgrade the Operator first. +When upgrading Materialize, always upgrade the Helm Chart and Materialize +Operator first. {{}} -The Materialize Kubernetes Operator is deployed via Helm and can be updated through standard Helm upgrade commands. +### Update the Helm Chart repository + +To update your Materialize Helm Chart repository: ```shell -helm upgrade my-materialize-operator materialize/misc/helm-charts/operator +helm repo update materialize ``` -If you have custom values, make sure to include your values file: +View the available chart versions: ```shell -helm upgrade my-materialize-operator materialize/misc/helm-charts/operator -f my-values.yaml +helm search repo materialize/materialize-operator --versions ``` -## Upgrading Materialize Instances +### Upgrade your Materialize Operator -To minimize unexpected downtime and avoid connection drops at critical -periods for your application, the upgrade process involves two steps: +The Materialize Kubernetes Operator is deployed via Helm and can be updated +through standard `helm upgrade` command: -- First, stage the changes (`environmentdImageRef` with the new version) to the - Materialize custom resource. The Operator watches for changes but does not - automatically roll out the changes. +{{% include-syntax file="self_managed/upgrades" +example="syntax-helm-upgrade-operator" %}} -- Second, roll out the changes by specifying a new UUID for `requestRollout`. +You can use `helm list` to find your release name. For example, if your Operator +is running in the namespace `materialize`, run `helm list`: + +```shell +helm list -n materialize +``` + +Retrieve the name associated with the `materialize-operator` **CHART**; for +example, `my-demo` in the following helm list: +```none +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +my-demo materialize 1 2025-12-08 11:39:50.185976 -0500 EST deployed materialize-operator-v26.1.0 v26.1.0 +``` + +Then, to upgrade: -### Updating the `environmentdImageRef` +```shell +helm upgrade -n materialize my-demo materialize/misc/helm-charts/operator \ + -f my-values.yaml \ + --version {{< self-managed/versions/get-latest-version >}} +``` -When upgrading your Materialize instances, you'll first want to update the -`environmentdImageRef` field in the Materialize custom resource spec. +## Upgrading Materialize Instances -To find a compatible version with your currently deployed Materialize Operator, check the `appVersion` in the Helm repository. +**After** you have upgraded your Materialize Operator, upgrade your Materialize +instance(s) to the **APP Version** of the Operator. To find the version of your +currently deployed Materialize Operator: ```shell helm list -n materialize ``` -Using the returned version, we can construct an image ref. +You will use the returned **App Version** for the updated `environmentdImageRef` +value. Specifically, for your Materialize instance(s), set +`environmentdImageRef` value to use the new version: ``` -environmentdImageRef: docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}} +spec: + environmentdImageRef: docker.io/materialize/environmentd: ``` -The following is an example of how to patch the version. + +To minimize unexpected downtime and avoid connection drops at critical +periods for your application, the upgrade process involves two steps: + +- First, stage the changes (update the `environmentdImageRef` with the new + version) to the Materialize custom resource. The Operator watches for changes + but does not automatically roll out the changes. + +- Second, roll out the changes by specifying a new UUID for `requestRollout`. + + +### Stage the Materialize instance version change + +To stage the Materialize instances version upgrade, update the +`environmentdImageRef` field in the Materialize custom resource spec to the +compatible version of your currently deployed Materialize Operator. + +To stage, but **not** rollout, the Materialize instance version upgrade, you can +use the `kubectl patch` command; for example, if the **App Version** is {{< self-managed/versions/get-latest-version >}}: + ```shell -# For version updates, first update the image reference kubectl patch materialize \ -n \ --type='merge' \ @@ -83,14 +124,14 @@ kubectl patch materialize \ ``` {{< note >}} -Until you specify a new `requestRollout`, the Operator -watches for updates but does not roll out the changes. +Until you specify a new `requestRollout`, the Operator watches for updates but +does not roll out the changes. {{< /note >}} ### Applying the changes via `requestRollout` -To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. +To apply chang Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. Be sure to consult the [Rollout Configurations](#rollout-configuration) to ensure you've selected the correct rollout behavior. ```shell # Then trigger the rollout with a new UUID @@ -100,7 +141,6 @@ kubectl patch materialize \ -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" ``` - It is possible to combine both operations in a single command if preferred: ```shell @@ -169,6 +209,20 @@ kubectl get materialize -n materialize-environment -w # Check the logs of the operator kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize ``` + +## Upgrade guides + +The following upgrade guides are available: + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-landing-guides-helm" %}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-landing-guides-unified" %}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-landing-guides-legacy" %}} + ## Version Specific Upgrade Notes ### Upgrading to `v26.1` and later versions @@ -186,9 +240,9 @@ kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize ## See also - [Materialize Operator - Configuration](/self-managed-deployments/appendix/configuration/) + Configuration](/self-managed-deployments/operator-configuration/) - [Materialize CRD Field - Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md b/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md index d79598e75f0a5..f576539376766 100644 --- a/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/_index.md @@ -4,7 +4,7 @@ description: "Upgrading Self-Managed Materialize using legacy Terraform modules" menu: main: parent: "upgrading" - weight: 30 + weight: 60 identifier: "upgrading-legacy" --- diff --git a/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md index 3e89d7d341a78..8ce8412780c05 100644 --- a/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy.md @@ -7,7 +7,7 @@ menu: main: parent: "upgrading-legacy" identifier: "upgrade-on-azure-legacy" - weight: 20 + weight: 30 --- diff --git a/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md index 9d2eac02df589..4694f4d1f75f5 100644 --- a/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md +++ b/doc/user/content/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy.md @@ -7,7 +7,7 @@ menu: main: parent: "upgrading-legacy" identifier: "upgrade-on-gcp-legacy-terraform-module" - weight: 30 + weight: 40 --- {{< annotation type="Disambiguation" >}} diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md index a708bfbd62c96..9bca5e9ee9aba 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md @@ -8,16 +8,19 @@ menu: --- The following tutorial upgrades your Materialize deployment running on AWS -Elastic Kubernetes Service (EKS). The tutorial assumes you have installed -Materialize on AWS using the instructions on [Install on +Elastic Kubernetes Service (EKS). The tutorial assumes you have installed the +example on [Install on AWS](/self-managed-deployments/installation/install-on-aws/). ## Upgrade guidelines -{{< include-md file="shared-content/self-managed/general-rules-for-upgrades.md" ->}} +{{% include-from-yaml data="self_managed/upgrades" +name="upgrades-general-rules" %}} -{{< include-md file="shared-content/self-managed/version-compatibility-upgrade-banner.md" >}} +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="upgrade-major-version-restriction" >}} +{{< /note >}} ## Prerequisites @@ -28,7 +31,7 @@ AWS](/self-managed-deployments/installation/install-on-aws/). - [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) - [Helm 3.2.0+](https://helm.sh/docs/intro/install/) -## Procedure +## Upgrade process {{< important >}} @@ -40,7 +43,8 @@ The following procedure performs a rolling upgrade, where both the old and new M 1. Open a Terminal window. -1. Configure AWS CLI with your AWS credentials. For details, see the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). +1. Configure AWS CLI with your AWS credentials. For details, see the [AWS + documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). 1. Go to the Terraform directory for your Materialize deployment. For example, if you deployed from the `aws/examples/simple` directory: @@ -67,174 +71,43 @@ The following procedure performs a rolling upgrade, where both the old and new M For help with `kubectl` commands, see [kubectl Quick reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). -### Step 2: Upgrading the Materialize Operator +### Step 2: Update the Helm Chart {{< important >}} -When upgrading Materialize, always upgrade the operator first. +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} {{}} -The Materialize Kubernetes operator is deployed via Helm and can be updated through standard Helm upgrade commands. +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-update-helm-chart" %}} +### Step 3: Upgrade the Materialize Operator -1. Check the current operator version: - - ```bash - helm list -n materialize - ``` - -1. Upgrade the Materialize operator using Helm: - - - If your deployment has not specified custom settings: - - ```bash - helm upgrade materialize-operator materialize/materialize-operator \ - -n materialize \ - --version - ``` - - Replace `` with the desired operator version. - - - If your deployment has specified custom settings, make sure to include your - values file: - - ```bash - helm upgrade materialize-operator materialize/materialize-operator \ - -n materialize \ - --version \ - -f my-values.yaml - ``` - -1. Verify that the operator is running: - - ```bash - kubectl -n materialize get all - ``` - - Verify the operator upgrade by checking its events: - - ```bash - kubectl -n materialize describe pod -l app.kubernetes.io/name=materialize-operator - ``` - - - The **Containers** section should show the `--helm-chart-version` argument set to the new version. - - The **Events** section should list that the new version of the orchestratord has been pulled. - -## Step 3: Upgrading Materialize Instances - -In order to minimize unexpected downtime and avoid connection drops at critical periods for your application, changes are not immediately and automatically rolled out by the Operator. Instead, the upgrade process involves two steps: -- First, staging spec changes to the Materialize custom resource. -- Second, applying the changes via a `requestRollout`. - -### Updating the `environmentdImageRef` - -To find a compatible version with your currently deployed Materialize operator, check the `appVersion` in the Helm repository: - -```bash -helm list -n materialize -``` - -Using the returned version, we can construct an image ref. We always recommend using the official Materialize image repository `docker.io/materialize/environmentd`. - -The following is an example of how to patch the version: - -```bash -# For version updates, first update the image reference -kubectl patch materialize \ - -n \ - --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" -``` - -Replace: -- `` with your Materialize instance name (typically a UUID). -- `` with your instance namespace (typically `materialize-environment`). - -### Applying the changes via `requestRollout` - -To apply changes and kick off the Materialize instance upgrade, you must update the `requestRollout` field in the Materialize custom resource spec to a new UUID. - -Be sure to consult the [Rollout Configurations](/self-managed-deployments/upgrading/#rollout-strategies) to ensure you've selected the correct rollout behavior. - -```bash -# Then trigger the rollout with a new UUID -kubectl patch materialize \ - -n \ - --type='merge' \ - -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" -``` - -It is possible to combine both operations in a single command if preferred: - -```bash -kubectl patch materialize \ - -n materialize-environment \ - --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" -``` - -### Using YAML Definition - -Alternatively, you can update your Materialize custom resource definition directly: - -```yaml -apiVersion: materialize.cloud/v1alpha1 -kind: Materialize -metadata: - name: - namespace: materialize-environment -spec: - environmentdImageRef: docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}} # Update version as needed - requestRollout: # Generate new UUID - rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. Can be WaitUntilReady or ImmediatelyPromoteCausingDowntime - backendSecretName: materialize-backend -``` - -Apply the updated definition: - -```bash -kubectl apply -f materialize.yaml -``` - -## Verifying the Upgrade - -After initiating the rollout, you can monitor the status field of the Materialize custom resource to check on the upgrade. - -```bash -# Watch the status of your Materialize environment -kubectl get materialize -n materialize-environment -w - -# Check the logs of the operator -kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize -``` - -Verify that the components are running after the upgrade: +{{< important >}} -```bash -kubectl -n materialize-environment get all -``` +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} -Verify upgrade by checking the `balancerd` events: +{{}} -```bash -kubectl -n materialize-environment describe pod -l app=balancerd -``` +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-operator" %}} -The **Events** section should list that the new version of the `balancerd` has been pulled. +### Step 4: Upgrading Materialize Instances -Verify the upgrade by checking the `environmentd` events: +{{< important >}} -```bash -kubectl -n materialize-environment describe pod -l app=environmentd -``` +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} -The **Events** section should list that the new version of the `environmentd` has been pulled. +{{}} -Open the Materialize Console. The Console should display the new version. +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-instance" %}} ## See also -- [Materialize Operator Configuration](/self-managed-deployments/appendix/configuration/) -- [Materialize CRD Field Descriptions](/self-managed-deployments/appendix/materialize-crd-field-descriptions/) +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md new file mode 100644 index 0000000000000..cc251f412b686 --- /dev/null +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md @@ -0,0 +1,113 @@ +--- +title: "Upgrade on Azure" +description: "Upgrade Materialize on Azure using the Unified Terraform module." +menu: + main: + parent: "upgrading" + weight: 30 +--- + +The following tutorial upgrades your Materialize deployment running on Azure +Kubernetes Service (AKS). The tutorial assumes you have installed the +example on [Install on +Azure](/self-managed-deployments/installation/install-on-azure/). + +## Upgrade guidelines + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrades-general-rules" %}} + +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="upgrade-major-version-restriction" >}} +{{< /note >}} + +## Prerequisites + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +## Upgrade process + +{{< important >}} + +The following procedure performs a rolling upgrade, where both the old and new Materialize instances are running before the old instances are removed. When performing a rolling upgrade, ensure you have enough resources to support having both the old and new Materialize instances running. + +{{}} + +### Step 1: Set up + +1. Open a Terminal window. + +1. Configure Azure CLI with your Azure credentials. For details, see the [Azure + documentation](https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + +1. Go to the Terraform directory for your Materialize deployment. For example, + if you deployed from the `azure/examples/simple` directory: + + ```bash + cd materialize-terraform-self-managed/azure/examples/simple + ``` + +1. Configure `kubectl` to connect to your AKS cluster, replacing: + + - `` with the name of your Azure resource group. Your resource group name can be found in the Terraform output or Azure portal. + + - `` with the name of your AKS cluster. Your cluster name can be found in the Terraform output or Azure portal. + + ```bash + az aks get-credentials --resource-group --name + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl get nodes + ``` + + For help with `kubectl` commands, see [kubectl Quick reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +### Step 2: Update the Helm Chart + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-update-helm-chart" %}} + +### Step 3: Upgrade the Materialize Operator + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-operator" %}} + +### Step 4: Upgrading Materialize Instances + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-instance" %}} + +## See also + +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md new file mode 100644 index 0000000000000..319d0c9373e1f --- /dev/null +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md @@ -0,0 +1,121 @@ +--- +title: "Upgrade on GCP" +description: "Upgrade Materialize on GCP using the Unified Terraform module." +menu: + main: + parent: "upgrading" + weight: 30 +--- + +The following tutorial upgrades your Materialize deployment running on Google +Kubernetes Engine (GKE). The tutorial assumes you have installed the +example on [Install on +GCP](/self-managed-deployments/installation/install-on-gcp/). + +## Upgrade guidelines + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrades-general-rules" %}} + +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="upgrade-major-version-restriction" >}} +{{< /note >}} + +## Prerequisites + +### Required Tools + +- [Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) +- [Google Cloud CLI](https://cloud.google.com/sdk/docs/install) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Helm 3.2.0+](https://helm.sh/docs/intro/install/) + +## Upgrade process + +{{< important >}} + +The following procedure performs a rolling upgrade, where both the old and new Materialize instances are running before the old instances are removed. When performing a rolling upgrade, ensure you have enough resources to support having both the old and new Materialize instances running. + +{{}} + +### Step 1: Set up + +1. Open a Terminal window. + +1. Configure Google Cloud CLI with your GCP credentials. For details, see the [Google Cloud + documentation](https://cloud.google.com/sdk/docs/initializing). + +1. Go to the Terraform directory for your Materialize deployment. For example, + if you deployed from the `gcp/examples/simple` directory: + + ```bash + cd materialize-terraform-self-managed/gcp/examples/simple + ``` + +1. Configure `kubectl` to connect to your GKE cluster, replacing: + + - `` with the name of your GKE cluster. Your cluster name can + be found in the Terraform output. For the sample example, the cluster name + is `-eks`. + + - `` with the region of your GKE cluster. Your region can be + found in the Terraform output `gke_cluster_location`, corresponds to the + `region` value in your `terraform.tfvars`. + + - `` with your GCP project ID. + + ```bash + gcloud container clusters get-credentials \ + --region \ + --project + ``` + + To verify that you have configured correctly, run the following command: + + ```bash + kubectl get nodes + ``` + + For help with `kubectl` commands, see [kubectl Quick reference](https://kubernetes.io/docs/reference/kubectl/quick-reference/). + +### Step 2: Update the Helm Chart + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-update-helm-chart" %}} + +### Step 3: Upgrade the Materialize Operator + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-operator" %}} + +### Step 4: Upgrading Materialize Instances + +{{< important >}} + +{{% include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" %}} + +{{}} + +{{% include-from-yaml data="self_managed/upgrades" +name="upgrade-materialize-instance" %}} + +## See also + +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) +- [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md index 599275c83632b..866e8a933b2b6 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-kind.md @@ -54,5 +54,5 @@ having both the old and new Materialize instances running. ## See also - [Materialize Operator - Configuration](/self-managed-deployments/appendix/configuration/) + Configuration](/self-managed-deployments/operator-configuration/) - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/data/self_managed/upgrades.yml b/doc/user/data/self_managed/upgrades.yml index 0d6102bbde202..e707cf7a77bac 100644 --- a/doc/user/data/self_managed/upgrades.yml +++ b/doc/user/data/self_managed/upgrades.yml @@ -5,11 +5,31 @@ - **Always** check the [version specific upgrade notes](/self-managed-deployments/upgrading/#version-specific-upgrade-notes). - - **Always** upgrade the operator **first** and ensure version compatibility - between the operator and the Materialize instance you are upgrading to. + - {{< include-from-yaml data="self_managed/upgrades" name="upgrade-order-rule" >}} - - **Always** upgrade your Materialize instances **after** upgrading the operator - to ensure compatibility. +- name: upgrade-order-rule + content: | + **Always** upgrade the Materialize Operator **before** + upgrading the Materialize instances. + +- name: syntax-helm-upgrade-operator + code: | + helm upgrade -n materialize/materialize-operator \ + --version \ + -f + syntax_elements: + - name: "``" + description: | + The namespace where the Operator is running. (e.g., `materialize`) + - name: "``" + description: | + The release name. You can use `helm list -n ` to find your release name. + - name: "``" + description: | + The upgrade version. + - name: "``" + description: | + The name of your customization file, if using. If you are configuring using `\-\-set key=value` options, include them as well. - name: upgrade-landing-guides-helm content: | @@ -25,7 +45,11 @@ content: | ### Upgrade using Unified Terraform Modules - + | Guide | Description | + | ------------- | ----------- | + | [Upgrade on AWS (Unified Terraform)](/self-managed-deployments/upgrading/upgrade-on-aws/) | Uses Unified Terraform module to deploy Materialize to AWS Elastic Kubernetes Service (EKS). + | [Upgrade on Azure (Unified Terraform)](/self-managed-deployments/upgrading/upgrade-on-azure/) | Uses Unified Terraform module to deploy Materialize to Azure Kubernetes Service (AKS). + | [Upgrade on GCP (Unified Terraform)](/self-managed-deployments/upgrading/upgrade-on-gcp/) | Uses Unified Terraform module to deploy Materialize to Google Kubernetes Engine (GKE). - name: upgrade-landing-guides-legacy content: | @@ -37,3 +61,128 @@ | [Upgrade on AWS (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-aws-legacy/) | Uses legacy Terraform module to deploy Materialize to AWS Elastic Kubernetes Service (EKS). | [Upgrade on Azure (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-azure-legacy/) | Uses legacy Terraform module to deploy Materialize to Azure Kubernetes Service (AKS). | [Upgrade on GCP (Legacy Terraform)](/self-managed-deployments/upgrading/legacy/upgrade-on-gcp-legacy/) | Uses legacy Terraform module to deploy Materialize to Google Kubernetes Engine (GKE). + +- name: downgrade-restriction + content: | + Downgrading is not supported. + +- name: upgrade-major-version-restriction + content: | + For major version upgrades, you can **only** upgrade **one** major version + at a time. For example, upgrades from **v26**.1.0 to **v27**.3.0 is + permitted but **v26**.1.0 to **v28**.0.0 is not. + +- name: upgrade-update-helm-chart + content: | + To update your Materialize Helm Chart repository: + + 1. Update the Helm repo: + + ```shell + helm repo update materialize + ``` + + 1. View the available chart versions: + + ```shell + helm search repo materialize/materialize-operator --versions + ``` +- name: upgrade-materialize-operator + content: | + 1. Use `helm list` to find the release name. The sample example + deployment using the unified Terraform module deploys the Operator in the + `materialize` namespace. + + ```shell + helm list -n materialize + ``` + + Retrieve the release name (corresponds to the `name_prefix` variable + specified in your `terraform.tfvars`) associated with the + `materialize-operator` **CHART**; for example, `simple-demo` in the following output: + + ```none + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + simple-demo materialize 1 2025-12-08 11:39:50.185976 -0500 EST deployed materialize-operator-v26.1.0 v26.1.0 + ``` + + 1. Upgrade your Operator. For example, the following upgrades the Operator + to {{< self-managed/versions/get-latest-version >}}: + + {{< note >}} + {{% include-from-yaml data="self_managed/upgrades" + name="upgrade-major-version-restriction" %}} + {{< /note >}} + + ```shell + helm upgrade -n materialize my-demo materialize/misc/helm-charts/operator \ + -f my-values.yaml \ + --version {{< self-managed/versions/get-latest-version >}} + ``` + + 1. Verify that the Operator is running: + + ```bash + kubectl -n materialize get all + ``` + + 1. Get the **APP VERSION** of the Operator. + + ```shell + helm list -n materialize + ``` + + The **APP VERSION** will be the value that you will use for upgrading + Materialize instances. + +- name: upgrade-materialize-instance + content: | + + **After** you have upgraded your Materialize Operator, upgrade your + Materialize instance(s) to the **APP Version** of the Operator. When + upgrading Materialize Instances versions, changes are not automatically + rolled out by the Operator in order to minimize unexpected downtime and + avoid connection drops at critical periods. Instead, the upgrade process + involves two steps: + + - First, staging the version change to the Materialize custom resource. + - Second, rolling out the changes via a `requestRollout` flag. + + 1. Find the name of the Materialize instance to upgrade. The sample example + deployment using the unified Terraform module deploys the Materialie + instance in the`materialize-environment` namespace. + + ```shell + kubectl get materialize -n materialize-environment + ``` + + In the example deployment, the name of the instance is `main`. + + ```none + NAME + main + ``` + + 1. Stage, but not rollout, the Materialize instance version upgrade. + + ```shell + kubectl patch materialize main\ + -n materialize-environment \ + --type='merge' \ + -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd: {{< self-managed/versions/get-latest-version >}}\"}}" + ``` + + 1. Rollout the Materialize instance version change. + + ```shell + kubectl patch materialize main \ + -n materialize-environment \ + --type='merge' \ + -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" + ``` + + 1. Verify the upgrade by checking the `environmentd` events: + + ```bash + kubectl -n materialize-environment describe pod -l app=environmentd + ``` From 86d70c2fdef428913c3e5e8ec72991d08e8f215a Mon Sep 17 00:00:00 2001 From: kay-kim Date: Mon, 8 Dec 2025 22:44:51 -0500 Subject: [PATCH 06/11] fix typos/copy+paste errors/psql connect --- .../self-managed-deployments/_index.md | 8 +++-- .../installation/install-on-azure.md | 30 ++++++++++++---- .../installation/install-on-gcp.md | 36 +++++++++++++------ .../upgrading/_index.md | 6 ++-- .../upgrading/upgrade-on-aws.md | 5 +++ .../upgrading/upgrade-on-azure.md | 5 +++ .../upgrading/upgrade-on-gcp.md | 7 +++- doc/user/data/self_managed/upgrades.yml | 11 ++++-- 8 files changed, 82 insertions(+), 26 deletions(-) diff --git a/doc/user/content/self-managed-deployments/_index.md b/doc/user/content/self-managed-deployments/_index.md index 0b33e376d80f3..ac4ec94c52806 100644 --- a/doc/user/content/self-managed-deployments/_index.md +++ b/doc/user/content/self-managed-deployments/_index.md @@ -279,10 +279,12 @@ components work together: operator detects this and creates all necessary Kubernetes resources, including the `environmentd`, `balancerd`, and `console` pods. -1. **Connect to the instance**: Use the Materialize Console on port 8080 (or SQL - client on port 6875) to connect to the `environmentd` service endpoint. +1. **Connect to the instance**: Use the Materialize Console on port 8080 to + connecto to the `console` service endpoint or SQL client on port 6875 to + connect to the `balancerd` service endpoint. - If authentication is enabled, you must set up users. + If authentication is enabled, you must first connect to the Materialize + Console and set up users. 1. **Create clusters**: Issue SQL commands to create clusters. Materialize coordinates with the operator to provision StatefulSets for replicas. diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index f668199174f0c..d53e6bb1c2b72 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -86,7 +86,7 @@ This example provisions the following infrastructure: |----------|-------------| | Operator | Materialize Kubernetes operator in the `materialize` namespace | | Instance | Single Materialize instance in the `materialize-environment` namespace | -| Load Balancers | Internal Azure Load Balancers for Materialize access | +| Load Balancers | Internal Azure Load Balancers for Materialize access {{< yaml-table data="self_managed/default_ports" >}} | ## Prerequisites @@ -206,14 +206,19 @@ An active Azure subscription with appropriate permissions to create: to proceed. 1. From the output, you will need the following field(s) to connect: - - `console_load_balancer_ip` + - `console_load_balancer_ip` for the Materialize Console + - `balancerd_load_balancer_ip` to connect PostgreSQL-compatible clients/drivers. ### Step 4. Optional. Verify the deployment. 1. Configure `kubectl` to connect to your cluster: + - `` with the name of your Azure resource group. Your resource group name can be found in the Terraform output or Azure portal. + - `` with the name of your AKS cluster. Your cluster + name can be found in the Terraform output or Azure portal. + ```bash - az aks get-credentials --resource-group --name + az aks get-credentials --resource-group --name ``` 1. Check the status of your deployment: @@ -222,9 +227,6 @@ An active Azure subscription with appropriate permissions to create: ### Step 5: Connect to Materialize -Using the `console_load_balancer_ip` from the Terraform output, you can connect -to Materialize via the Materialize Console. - {{< note >}} If using an **internal Network Load Balancer (NLB)** for your Materialize @@ -233,6 +235,11 @@ privately connected to it. {{< /note >}} +#### Connect using the Materialize Console + +Using the `console_load_balancer_ip` from the Terraform output, you can connect +to Materialize via the Materialize Console. + 1. To connect to the Materialize Console, open a browser to `https://:8080`, substituting your ``. @@ -244,6 +251,17 @@ privately connected to it. {{< /tip >}} +#### Connect using the `psql` + +Using the `balancerd_load_balancer_ip` value from the Terraform output, you can +connect to Materialize via PostgreSQL-compatible clients/drivers, such as +`psql`: + +```bash +psql postgres://:6875/materialize +``` + + ## Customizing Your Deployment For more information on the Terraform modules, see both the [top diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md index 3abe3f7fbb19a..cb0c5e4f2467c 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -215,13 +215,14 @@ A Google account with permission to: to proceed. 1. From the output, you will need the following field(s) to connect: - - `` + - `console_load_balancer_ip` for the Materialize Console + - `balancerd_load_balancer_ip` to connect PostgreSQL-compatible clients/drivers. 1. Configure `kubectl` to connect to your GKE cluster, replacing: - `` with the name of your GKE cluster. Your cluster name can be found in the Terraform output. For the sample example, the cluster - name is `-eks`. + name is `-gke`. - `` with the region of your GKE cluster. Your region can be found in the Terraform output `gke_cluster_location`, corresponds to the @@ -243,8 +244,6 @@ A Google account with permission to: ### Step 5: Connect to Materialize -Using the `console_load_balancer_ip` from the Terraform output, you can connect -to Materialize via the Materialize Console. {{< note >}} @@ -254,16 +253,31 @@ privately connected to it. {{< /note >}} -1. To connect to the Materialize Console, open a browser to - `https://:8080`, substituting your - ``. +#### Connect using the Materialize Console + +Using the `console_load_balancer_ip` from the Terraform output, you can connect +to Materialize via the Materialize Console. - {{< tip >}} +To connect to the Materialize Console, open a browser to +`https://:8080`, substituting your +``. - {{% include-from-yaml data="self_managed/installation" - name="install-uses-self-signed-cluster-issuer" %}} +{{< tip >}} + +{{% include-from-yaml data="self_managed/installation" +name="install-uses-self-signed-cluster-issuer" %}} + +{{< /tip >}} + +#### Connect using the `psql` + +Using the `balancerd_load_balancer_ip` value from the Terraform output, you can +connect to Materialize via PostgreSQL-compatible clients/drivers, such as +`psql`: - {{< /tip >}} +```bash +psql postgres://:6875/materialize +``` ## Customizing Your Deployment diff --git a/doc/user/content/self-managed-deployments/upgrading/_index.md b/doc/user/content/self-managed-deployments/upgrading/_index.md index 7b3814db1984a..fc330197905b6 100644 --- a/doc/user/content/self-managed-deployments/upgrading/_index.md +++ b/doc/user/content/self-managed-deployments/upgrading/_index.md @@ -72,7 +72,7 @@ my-demo materialize 1 2025-12-08 11:39:50.185976 -0500 EST deployed materia Then, to upgrade: ```shell -helm upgrade -n materialize my-demo materialize/misc/helm-charts/operator \ +helm upgrade -n materialize my-demo materialize/operator \ -f my-values.yaml \ --version {{< self-managed/versions/get-latest-version >}} ``` @@ -120,7 +120,7 @@ use the `kubectl patch` command; for example, if the **App Version** is {{< self kubectl patch materialize \ -n \ --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" + -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" ``` {{< note >}} @@ -147,7 +147,7 @@ It is possible to combine both operations in a single command if preferred: kubectl patch materialize \ -n materialize-environment \ --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" + -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" ``` ### Using YAML Definition diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md index 9bca5e9ee9aba..b8b1cf480069f 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md @@ -22,6 +22,11 @@ name="upgrades-general-rules" %}} name="upgrade-major-version-restriction" >}} {{< /note >}} +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="downgrade-restriction" >}} +{{< /note >}} + ## Prerequisites ### Required Tools diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md index cc251f412b686..4f3440db060ea 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md @@ -22,6 +22,11 @@ name="upgrades-general-rules" %}} name="upgrade-major-version-restriction" >}} {{< /note >}} +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="downgrade-restriction" >}} +{{< /note >}} + ## Prerequisites ### Required Tools diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md index 319d0c9373e1f..fa7947f07f8a7 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md @@ -22,6 +22,11 @@ name="upgrades-general-rules" %}} name="upgrade-major-version-restriction" >}} {{< /note >}} +{{< note >}} +{{< include-from-yaml data="self_managed/upgrades" +name="downgrade-restriction" >}} +{{< /note >}} + ## Prerequisites ### Required Tools @@ -57,7 +62,7 @@ The following procedure performs a rolling upgrade, where both the old and new M - `` with the name of your GKE cluster. Your cluster name can be found in the Terraform output. For the sample example, the cluster name - is `-eks`. + is `-gke`. - `` with the region of your GKE cluster. Your region can be found in the Terraform output `gke_cluster_location`, corresponds to the diff --git a/doc/user/data/self_managed/upgrades.yml b/doc/user/data/self_managed/upgrades.yml index e707cf7a77bac..ce7dd6afe2d09 100644 --- a/doc/user/data/self_managed/upgrades.yml +++ b/doc/user/data/self_managed/upgrades.yml @@ -106,6 +106,13 @@ simple-demo materialize 1 2025-12-08 11:39:50.185976 -0500 EST deployed materialize-operator-v26.1.0 v26.1.0 ``` + 1. Export your current values to a file to preserve any custom settings: + + ```shell + helm get values -n materialize simple-demo -o yaml > my-values.yaml + ``` + + 1. Upgrade your Operator. For example, the following upgrades the Operator to {{< self-managed/versions/get-latest-version >}}: @@ -115,7 +122,7 @@ {{< /note >}} ```shell - helm upgrade -n materialize my-demo materialize/misc/helm-charts/operator \ + helm upgrade -n materialize simple-demo materialize/materialize-operator \ -f my-values.yaml \ --version {{< self-managed/versions/get-latest-version >}} ``` @@ -169,7 +176,7 @@ kubectl patch materialize main\ -n materialize-environment \ --type='merge' \ - -p "{\"spec\": {\"environmentdImageRef\": \"materialize/environmentd: {{< self-managed/versions/get-latest-version >}}\"}}" + -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\"}}" ``` 1. Rollout the Materialize instance version change. From 517a7a268d9aa982e2261862537e03494789b231 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Mon, 8 Dec 2025 22:45:41 -0500 Subject: [PATCH 07/11] linting errors --- .../self-managed-deployments/installation/install-on-azure.md | 2 +- doc/user/data/self_managed/upgrades.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index d53e6bb1c2b72..44aed921b8ef3 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -216,7 +216,7 @@ An active Azure subscription with appropriate permissions to create: - `` with the name of your AKS cluster. Your cluster name can be found in the Terraform output or Azure portal. - + ```bash az aks get-credentials --resource-group --name ``` diff --git a/doc/user/data/self_managed/upgrades.yml b/doc/user/data/self_managed/upgrades.yml index ce7dd6afe2d09..86cf90363eeb8 100644 --- a/doc/user/data/self_managed/upgrades.yml +++ b/doc/user/data/self_managed/upgrades.yml @@ -107,7 +107,7 @@ ``` 1. Export your current values to a file to preserve any custom settings: - + ```shell helm get values -n materialize simple-demo -o yaml > my-values.yaml ``` From 52abb4be0e2f5e78a8b104d5bb426b11976e8b31 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Tue, 9 Dec 2025 00:01:39 -0500 Subject: [PATCH 08/11] additional tweaks --- .../installation/install-on-aws.md | 118 ++++++++++++------ .../installation/install-on-azure.md | 65 ++++++---- .../installation/install-on-gcp.md | 49 +++++--- .../upgrading/_index.md | 33 ++++- .../upgrade/upgrade-steps-local-kind.html | 11 +- 5 files changed, 189 insertions(+), 87 deletions(-) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-aws.md b/doc/user/content/self-managed-deployments/installation/install-on-aws.md index f65d9846fdc93..6299aacfb0559 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-aws.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-aws.md @@ -179,25 +179,36 @@ An active AWS account with appropriate permissions to create: to proceed. -1. From the output, you will need the following fields to connect: - - `dns_name` - - `external_login_password_mz_system` +1. From the output, you will need the following fields to connect using the + Materialize Console and PostgreSQL-compatible clients/drivers: + - `nlb_dns_name` + - `external_login_password_mz_system`. - {{< tip >}} + ```bash + terraform output -raw + ``` - To get the unredacted value for `external_login_password_mz_system`, - you can run `terraform output -json external_login_password_mz_system` + {{< tip >}} + Your shell may show an ending marker (such as `%`) because the + output did not end with a newline. Do not include the marker when using the value. + {{< /tip >}} - {{< /tip >}} -### Step 4. Optional. Verify the deployment. +1. Configure `kubectl` to connect to your cluster using your: + - `eks_cluster_name`. Your cluster name can be found in the Terraform output. + For the sample example, your cluster name has the form `{prefix_name}-eks`; + e.g., `simple-demo-eks`. -1. Configure `kubectl` to connect to your cluster: + - `region`. The region specified in your `terraform.tfvars` file; e.g., + `us-east-1` ```bash - aws eks update-kubeconfig --name --region + # aws eks update-kubeconfig --name --region + aws eks update-kubeconfig --name $(terraform output -raw eks_cluster_name) --region us-east-1 ``` +### Step 4. Optional. Verify the deployment. + 1. Check the status of your deployment: {{% include-from-yaml data="self_managed/installation" name="installation-verify-status" %}} @@ -205,8 +216,10 @@ An active AWS account with appropriate permissions to create: ### Step 5: Connect to Materialize Using the `dns_name` and `external_login_password_mz_system` from the Terraform -output, you can connect to Materialize via the Materialize Console to create -your users. +output, you can connect to Materialize via the Materialize Console or +PostgreSQL-compatible tools/drivers using the following ports: + +{{< yaml-table data="self_managed/default_ports" >}} {{< note >}} @@ -216,6 +229,8 @@ privately connected to it. {{< /note >}} +#### Connect to the Materialize Console + 1. To connect to the Materialize Console, open a browser to `https://:8080`, substituting your ``. @@ -227,43 +242,77 @@ privately connected to it. {{< /tip >}} 1. Log in as `mz_system`, using `external_login_password_mz_system` as the - password and create new users. + password. - In general, other than the initial login to create new users for new - deployments, avoid using `mz_system` since `mz_system` also used by the - Materialize Operator for upgrades and maintenance tasks. +1. Create new users and log out. -1. Once new users are created, logout as `mz_system` and login as one of the - created user. + In general, other than the initial login to create new users for new + deployments, avoid using `mz_system` since `mz_system` also used by the + Materialize Operator for upgrades and maintenance tasks. - For non-`mz_system` users, you can connect using the Materialize Console - or PostgreSQL-compatible tools and drivers using the following ports: + For more information on authentication and authorization for Self-Managed + Materialize, see: - {{< yaml-table data="self_managed/default_ports" >}} + - [Authentication](/security/self-managed/authentication/) + - [Access Control](/security/self-managed/access-control/) -For more information on authentication and authorization for Self-Managed -Materialize, see: +1. Login as one of the created user. -- [Authentication](/security/self-managed/authentication/) -- [Access Control](/security/self-managed/access-control/) +#### Connect using `psql` +1. To connect using `psql`, in the connection string, specify: -## Customizing Your Deployment + - `mz_system` as the user + - Your `` as the host + - `6875` as the port: -For more information on the Terraform modules, see both the [top -level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) -and [AWS -specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) -details. + ```sh + psql postgres://mz_system@:6875/materialize + ``` + + When prompted for the password, enter the + `external_login_password_mz_system` value. + +1. Create new users and log out. + + In general, other than the initial login to create new users for new + deployments, avoid using `mz_system` since `mz_system` also used by the + Materialize Operator for upgrades and maintenance tasks. + + For more information on authentication and authorization for Self-Managed + Materialize, see: + + - [Authentication](/security/self-managed/authentication/) + - [Access Control](/security/self-managed/access-control/) + +1. Login as one of the created user. + +## Customizing Your Deployment {{< tip >}} -You can customize each module independently. To reduce cost in your demo environment, you can tweak subnet CIDRs and instance types in `main.tf`. +To reduce cost in your demo environment, you can tweak subnet CIDRs +and instance types in `main.tf`. {{< /tip >}} -For details on recommended instance sizing and configuration, see the [AWS +You can customize each Terraform module independently. + +- For details on the Terraform modules, see both the [top +level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +and [AWS +specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/aws) READMEs. + +- For details on recommended instance sizing and configuration, see the [AWS deployment guide](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines/). +See also: + +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) + + ## Cleanup {{% self-managed/cleanup-cloud %}} @@ -271,6 +320,5 @@ guide](/self-managed-deployments/deployment-guidelines/aws-deployment-guidelines ## See Also -- [Materialize Operator - Configuration](/self-managed-deployments/operator-configuration/) + - [Troubleshooting](/self-managed-deployments/troubleshooting/) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index 44aed921b8ef3..a95d532a38941 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -209,18 +209,32 @@ An active Azure subscription with appropriate permissions to create: - `console_load_balancer_ip` for the Materialize Console - `balancerd_load_balancer_ip` to connect PostgreSQL-compatible clients/drivers. -### Step 4. Optional. Verify the deployment. + ```bash + terraform output -raw + ``` + + {{< tip >}} + Your shell may show an ending marker (such as `%`) because the + output did not end with a newline. Do not include the marker when using the value. + {{< /tip >}} -1. Configure `kubectl` to connect to your cluster: - - `` with the name of your Azure resource group. Your resource group name can be found in the Terraform output or Azure portal. +1. Configure `kubectl` to connect to your cluster using your: + - `resource_group_name`. Your + resource group name can be found in the Terraform output or in the + `terraform.tfvars` file. - - `` with the name of your AKS cluster. Your cluster - name can be found in the Terraform output or Azure portal. + - `akw_cluster_name`. Your cluster name can be found in the Terraform output. + For the sample example, your cluster name has the form `{prefix_name}-aks`; + e.g., simple-demo-aks`. ```bash - az aks get-credentials --resource-group --name + # az aks get-credentials --resource-group --name + az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw aks_cluster_name) ``` + +### Step 4. Optional. Verify the deployment. + 1. Check the status of your deployment: {{% include-from-yaml data="self_managed/installation" name="installation-verify-status" %}} @@ -240,16 +254,16 @@ privately connected to it. Using the `console_load_balancer_ip` from the Terraform output, you can connect to Materialize via the Materialize Console. -1. To connect to the Materialize Console, open a browser to - `https://:8080`, substituting your - ``. +To connect to the Materialize Console, open a browser to +`https://:8080`, substituting your +``. - {{< tip >}} +{{< tip >}} - {{% include-from-yaml data="self_managed/installation" - name="install-uses-self-signed-cluster-issuer" %}} +{{% include-from-yaml data="self_managed/installation" +name="install-uses-self-signed-cluster-issuer" %}} - {{< /tip >}} +{{< /tip >}} #### Connect using the `psql` @@ -264,24 +278,27 @@ psql postgres://:6875/materialize ## Customizing Your Deployment -For more information on the Terraform modules, see both the [top -level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) -and [Azure -specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) -details. - {{< tip >}} -You can customize each module independently. To reduce cost in your demo environment, you can tweak VM sizes and database tiers in `main.tf`. +To reduce cost in your demo environment, you can tweak VM sizes and database tiers in `main.tf`. {{< /tip >}} -{{< note >}} -Autoscaling: Uses Azure's native cluster autoscaler that integrates directly with Azure Virtual Machine Scale Sets for automated node scaling. In future we are planning to enhance this by making use of karpenter-provider-azure. -{{< /note >}} +You can customize each Terraform module independently. -For details on recommended instance sizing and configuration, see the [Azure +- For details on the Terraform modules, see both the [top +level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) +and [Azure +specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/azure) modules. + +- For details on recommended instance sizing and configuration, see the [Azure deployment guide](/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines/). +See also: +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) + ## Cleanup {{% self-managed/cleanup-cloud %}} diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md index cb0c5e4f2467c..d67861243db41 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -218,21 +218,30 @@ A Google account with permission to: - `console_load_balancer_ip` for the Materialize Console - `balancerd_load_balancer_ip` to connect PostgreSQL-compatible clients/drivers. -1. Configure `kubectl` to connect to your GKE cluster, replacing: + ```bash + terraform output -raw + ``` - - `` with the name of your GKE cluster. Your cluster name - can be found in the Terraform output. For the sample example, the cluster - name is `-gke`. + {{< tip >}} + Your shell may show an ending marker (such as `%`) because the + output did not end with a newline. Do not include the marker when using the value. + {{< /tip >}} - - `` with the region of your GKE cluster. Your region can be +1. Configure `kubectl` to connect to your GKE cluster, using your: + - `gke_cluster_name`. Your cluster name can be found in the Terraform output. + For the sample example, your cluster + name your cluster name has the form `-gke`. + + - `gke_cluster_location`. Your region can be found in the Terraform output `gke_cluster_location`, corresponds to the `region` value in your `terraform.tfvars`. - - `` with your GCP project ID. + - ``. Replace with your GCP project ID. ```bash - gcloud container clusters get-credentials \ - --region \ + # gcloud container clusters get-credentials --region --project + gcloud container clusters get-credentials $(terraform output -raw gke_cluster_name) \ + --region $(terraform output -raw gke_cluster_location) \ --project ``` @@ -281,15 +290,21 @@ psql postgres://:6875/materialize ## Customizing Your Deployment -For more information on the Terraform modules, see both the [top +{{< tip >}} +To reduce cost in your demo environment, you can tweak machine types and database tiers in `main.tf`. +{{< /tip >}} + +You can customize each module independently. + +- For details on the Terraform modules, see both the [top level](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main) and [GCP specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/tree/main/gcp) -details. +modules. -{{< tip >}} -You can customize each module independently. To reduce cost in your demo environment, you can tweak machine types and database tiers in `main.tf`. -{{< /tip >}} +- For details on recommended instance sizing and configuration, see the [GCP +deployment +guide](/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines/). {{< note >}} **GCP Storage Authentication Limitation:** Materialize currently only supports HMAC key authentication for GCS access (S3-compatible API). @@ -297,9 +312,11 @@ Current State: The modules configure both HMAC keys and Workload Identity, but M Future: Native GCS access via Workload Identity Federation or Kubernetes service account impersonation will be supported in a future release, eliminating the need for static credentials. {{< /note >}} -For details on recommended instance sizing and configuration, see the [GCP -deployment -guide](/self-managed-deployments/deployment-guidelines/gcp-deployment-guidelines/). +See also: +- [Materialize Operator + Configuration](/self-managed-deployments/operator-configuration/) +- [Materialize CRD Field + Descriptions](/self-managed-deployments/materialize-crd-field-descriptions/) ## Cleanup diff --git a/doc/user/content/self-managed-deployments/upgrading/_index.md b/doc/user/content/self-managed-deployments/upgrading/_index.md index fc330197905b6..4ad912acc4dad 100644 --- a/doc/user/content/self-managed-deployments/upgrading/_index.md +++ b/doc/user/content/self-managed-deployments/upgrading/_index.md @@ -96,7 +96,6 @@ spec: environmentdImageRef: docker.io/materialize/environmentd: ``` - To minimize unexpected downtime and avoid connection drops at critical periods for your application, the upgrade process involves two steps: @@ -106,7 +105,6 @@ periods for your application, the upgrade process involves two steps: - Second, roll out the changes by specifying a new UUID for `requestRollout`. - ### Stage the Materialize instance version change To stage the Materialize instances version upgrade, update the @@ -141,7 +139,11 @@ kubectl patch materialize \ -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" ``` -It is possible to combine both operations in a single command if preferred: +### Staging and applying in a single command + +Although separating the staging and rollout of the changes into two steps can +minimize unexpected downtime and avoid connection drops at critical periods, you +can, if preferred, combine both operations in a single command ```shell kubectl patch materialize \ @@ -177,9 +179,30 @@ kubectl apply -f materialize.yaml ## Rollout Configuration -### Forced Rollouts +### `requestRollout` + +Specify a new `UUID` value for the `requestRollout` to roll out the changes to +the Materialize instance. + +{{< note >}} + +`requestRollout` without the `forcedRollout` field only rolls out if changes +exist to the Materialize instance. To roll out even if there are no changes to +the instance, use with `forcedRollouts`. + +{{< /note >}} + +```shell +# Only rolls out if there are changes +kubectl patch materialize \ + -n \ + --type='merge' \ + -p "{\"spec\": {\"requestRollout\": \"$(uuidgen)\"}}" +``` +### `requestRollout` with `forcedRollouts` -If you need to force a rollout even when there are no changes to the instance: +Specify a new `UUID` value for `forcedRollout` to roll out even when there are +no changes to the instance. Use `forcedRollout` with `requestRollout`. ```shell kubectl patch materialize \ diff --git a/doc/user/layouts/shortcodes/self-managed/versions/upgrade/upgrade-steps-local-kind.html b/doc/user/layouts/shortcodes/self-managed/versions/upgrade/upgrade-steps-local-kind.html index 83507aebdfcaa..94ee296c4aac3 100644 --- a/doc/user/layouts/shortcodes/self-managed/versions/upgrade/upgrade-steps-local-kind.html +++ b/doc/user/layouts/shortcodes/self-managed/versions/upgrade/upgrade-steps-local-kind.html @@ -61,13 +61,13 @@ kubectl -n materialize-environment patch secret materialize-backend -p '{"stringData":{"license_key":""}}' --type=merge ``` -1. Create a new `upgrade-materialize.yaml` file with the following content: +1. Create a new `upgrade-materialize.yaml` file, updating the following fields: | Field | Description | |-------|-------------| | `environmentdImageRef` | Update the version to the new version. This should be the same as the operator version: `{{ $operator_version }}`. | | `requestRollout` or `forceRollout`| Enter a new UUID. Can be generated with `uuidgen`.
  • `requestRollout` triggers a rollout only if changes exist.
  • `forceRollout` triggers a rollout even if no changes exist.
| - | `inPlaceRollout` | Set to `false` to perform a rolling upgrade. For rolling upgrades, ensure you have enough resources to support having both the old and new Materialize instances running during the upgrade. | + ```yaml apiVersion: materialize.cloud/v1alpha1 @@ -78,13 +78,10 @@ spec: environmentdImageRef: materialize/environmentd:{{ $environmentd_version }} # Update version requestRollout: 22222222-2222-2222-2222-222222222222 # Enter a new UUID - # forceRollout: 33333333-3333-3333-3333-333333333333 # For forced rollouts - inPlaceRollout: false # When false, performs a rolling upgrade rather than in-place + # forceRollout: 33333333-3333-3333-3333-333333333333 # For forced rollouts + rolloutStrategy: WaitUntilReady # The mechanism to use when rolling out the new version. backendSecretName: materialize-backend ``` -
- WARNING! Please consult the Materialize team before setting inPlaceRollout to true and performing an in-place rollout. In almost all cases a rolling upgrade is preferred. -
1. Apply the upgrade-materialize.yaml file to your Materialize instance: From 62fb16b1a2484d3a92fed2a2cdc84d57fa8d4aa1 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Tue, 9 Dec 2025 15:09:41 -0500 Subject: [PATCH 09/11] accidental removal of note --- .../self-managed-deployments/installation/install-on-azure.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index a95d532a38941..98c582cac7219 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -293,6 +293,10 @@ specific](https://github.com/MaterializeInc/materialize-terraform-self-managed/t deployment guide](/self-managed-deployments/deployment-guidelines/azure-deployment-guidelines/). +{{< note >}} +Autoscaling: Uses Azure's native cluster autoscaler that integrates directly with Azure Virtual Machine Scale Sets for automated node scaling. In future we are planning to enhance this by making use of karpenter-provider-azure. +{{< /note >}} + See also: - [Materialize Operator Configuration](/self-managed-deployments/operator-configuration/) From 7a0b74c733f1a4a7ca87f99eb32f71f552856019 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Wed, 10 Dec 2025 13:34:15 -0500 Subject: [PATCH 10/11] some cleanup/nice-to-haves --- .../installation/install-on-aws.md | 36 +++++++++++++------ .../installation/install-on-azure.md | 23 +++++++----- .../installation/install-on-gcp.md | 28 +++++++++------ .../upgrading/_index.md | 2 +- .../upgrading/upgrade-on-aws.md | 19 ++++++++-- .../upgrading/upgrade-on-azure.md | 11 ++++-- .../upgrading/upgrade-on-gcp.md | 19 +++++----- 7 files changed, 92 insertions(+), 46 deletions(-) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-aws.md b/doc/user/content/self-managed-deployments/installation/install-on-aws.md index 6299aacfb0559..89af7a1385c19 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-aws.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-aws.md @@ -132,6 +132,14 @@ An active AWS account with appropriate permissions to create: cd materialize-terraform-self-managed/aws/examples/simple ``` +1. Ensure your AWS CLI is configured with the appropriate profile, substitute + `` with the profile to use: + + ```bash + # Set your AWS profile for the session + export AWS_PROFILE= + ``` + ### Step 2: Configure Terraform Variables 1. Create a `terraform.tfvars` file with the following variables: @@ -194,17 +202,19 @@ An active AWS account with appropriate permissions to create: {{< /tip >}} -1. Configure `kubectl` to connect to your cluster using your: - - `eks_cluster_name`. Your cluster name can be found in the Terraform output. - For the sample example, your cluster name has the form `{prefix_name}-eks`; - e.g., `simple-demo-eks`. +1. Configure `kubectl` to connect to your cluster, replacing: + + - `` with the your cluster name; i.e., the + `eks_cluster_name` in the Terraform output. For the + sample example, your cluster name has the form `{prefix_name}-eks`; e.g., + `simple-demo-eks`. - - `region`. The region specified in your `terraform.tfvars` file; e.g., - `us-east-1` + - `` with the region of your cluster. Your region can be + found in your `terraform.tfvars` file; e.g., `us-east-1`. ```bash # aws eks update-kubeconfig --name --region - aws eks update-kubeconfig --name $(terraform output -raw eks_cluster_name) --region us-east-1 + aws eks update-kubeconfig --name $(terraform output -raw eks_cluster_name) --region ``` ### Step 4. Optional. Verify the deployment. @@ -232,7 +242,13 @@ privately connected to it. #### Connect to the Materialize Console 1. To connect to the Materialize Console, open a browser to - `https://:8080`, substituting your ``. + `https://:8080`, substituting your ``. + + From the terminal, you can type: + + ```sh + open https://$(terraform output -raw nlb_dns_name):8080/materialize + ``` {{< tip >}} @@ -263,11 +279,11 @@ privately connected to it. 1. To connect using `psql`, in the connection string, specify: - `mz_system` as the user - - Your `` as the host + - Your `` as the host - `6875` as the port: ```sh - psql postgres://mz_system@:6875/materialize + psql postgres://mz_system@$(terraform output -raw nlb_dns_name):6875/materialize ``` When prompted for the password, enter the diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index 98c582cac7219..7e00a14005a2d 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -218,21 +218,20 @@ An active Azure subscription with appropriate permissions to create: output did not end with a newline. Do not include the marker when using the value. {{< /tip >}} -1. Configure `kubectl` to connect to your cluster using your: - - `resource_group_name`. Your - resource group name can be found in the Terraform output or in the +1. Configure `kubectl` to connect to your cluster, replacing: + - `` with your resource group name; i.e., the + `resource_group_name` in the Terraform output or in the `terraform.tfvars` file. - - `akw_cluster_name`. Your cluster name can be found in the Terraform output. - For the sample example, your cluster name has the form `{prefix_name}-aks`; - e.g., simple-demo-aks`. + - `` with your cluster name; i.e., the + `aks_cluster_name` in the Terraform output. For the sample example, + your cluster name has the form `{prefix_name}-aks`; e.g., `simple-demo-aks`. ```bash - # az aks get-credentials --resource-group --name + # az aks get-credentials --resource-group --name az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw aks_cluster_name) ``` - ### Step 4. Optional. Verify the deployment. 1. Check the status of your deployment: @@ -258,6 +257,12 @@ To connect to the Materialize Console, open a browser to `https://:8080`, substituting your ``. +From the terminal, you can type: + +```sh +open https://$(terraform output -raw console_load_balancer_ip):8080/materialize +``` + {{< tip >}} {{% include-from-yaml data="self_managed/installation" @@ -272,7 +277,7 @@ connect to Materialize via PostgreSQL-compatible clients/drivers, such as `psql`: ```bash -psql postgres://:6875/materialize +psql postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize ``` diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md index d67861243db41..6937d8f6fcf35 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -227,19 +227,20 @@ A Google account with permission to: output did not end with a newline. Do not include the marker when using the value. {{< /tip >}} -1. Configure `kubectl` to connect to your GKE cluster, using your: - - `gke_cluster_name`. Your cluster name can be found in the Terraform output. - For the sample example, your cluster - name your cluster name has the form `-gke`. +1. Configure `kubectl` to connect to your GKE cluster, replacing: - - `gke_cluster_location`. Your region can be - found in the Terraform output `gke_cluster_location`, corresponds to the - `region` value in your `terraform.tfvars`. + - `` with your cluster name; i.e., the + `gke_cluster_name` in the Terraform output. For the sample example, your + cluster name has the form `-gke`; e.g., `simple-demo-gke` - - ``. Replace with your GCP project ID. + - `` with your cluster location; i.e., the + `gke_cluster_location` in the Terraform output. Your + region can also be found in your `terraform.tfvars` file. + + - `` with your GCP project ID. ```bash - # gcloud container clusters get-credentials --region --project + # gcloud container clusters get-credentials --region --project gcloud container clusters get-credentials $(terraform output -raw gke_cluster_name) \ --region $(terraform output -raw gke_cluster_location) \ --project @@ -253,7 +254,6 @@ A Google account with permission to: ### Step 5: Connect to Materialize - {{< note >}} If using an **internal Network Load Balancer (NLB)** for your Materialize @@ -271,6 +271,12 @@ To connect to the Materialize Console, open a browser to `https://:8080`, substituting your ``. +From the terminal, you can type: + +```sh +open https://$(terraform output -raw console_load_balancer_ip):8080/materialize +``` + {{< tip >}} {{% include-from-yaml data="self_managed/installation" @@ -285,7 +291,7 @@ connect to Materialize via PostgreSQL-compatible clients/drivers, such as `psql`: ```bash -psql postgres://:6875/materialize +psql postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize ``` ## Customizing Your Deployment diff --git a/doc/user/content/self-managed-deployments/upgrading/_index.md b/doc/user/content/self-managed-deployments/upgrading/_index.md index 4ad912acc4dad..1f8cde017e551 100644 --- a/doc/user/content/self-managed-deployments/upgrading/_index.md +++ b/doc/user/content/self-managed-deployments/upgrading/_index.md @@ -152,7 +152,7 @@ kubectl patch materialize \ -p "{\"spec\": {\"environmentdImageRef\": \"docker.io/materialize/environmentd:{{< self-managed/versions/get-latest-version >}}\", \"requestRollout\": \"$(uuidgen)\"}}" ``` -### Using YAML Definition +#### Using YAML Definition Alternatively, you can update your Materialize custom resource definition directly: diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md index b8b1cf480069f..46cc6b5b360c8 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-aws.md @@ -58,14 +58,27 @@ The following procedure performs a rolling upgrade, where both the old and new M cd materialize-terraform-self-managed/aws/examples/simple ``` +1. Ensure your AWS CLI is configured with the appropriate profile, substitute + `` with the profile to use: + + ```bash + # Set your AWS profile for the session + export AWS_PROFILE= + ``` + 1. Configure `kubectl` to connect to your EKS cluster, replacing: - - `` with the name of your EKS cluster. Your cluster name can be found in the Terraform output or AWS console. + - `` with the your cluster name; i.e., the + `eks_cluster_name` in the Terraform output. For the + sample example, your cluster name has the form `{prefix_name}-eks`; e.g., + `simple-demo-eks`. - - `` with the region of your EKS cluster. + - `` with the region of your cluster. Your region can be + found in your `terraform.tfvars` file; e.g., `us-east-1`. ```bash - aws eks update-kubeconfig --name --region + # aws eks update-kubeconfig --name --region + aws eks update-kubeconfig --name $(terraform output -raw eks_cluster_name) --region ``` To verify that you have configured correctly, run the following command: diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md index 4f3440db060ea..e74e064e5dc70 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-azure.md @@ -60,12 +60,17 @@ The following procedure performs a rolling upgrade, where both the old and new M 1. Configure `kubectl` to connect to your AKS cluster, replacing: - - `` with the name of your Azure resource group. Your resource group name can be found in the Terraform output or Azure portal. + - `` with your resource group name; i.e., the + `resource_group_name` in the Terraform output or in the + `terraform.tfvars` file. - - `` with the name of your AKS cluster. Your cluster name can be found in the Terraform output or Azure portal. + - `` with your cluster name; i.e., the + `aks_cluster_name` in the Terraform output. For the sample example, + your cluster name has the form `{prefix_name}-aks`; e.g., simple-demo-aks`. ```bash - az aks get-credentials --resource-group --name + # az aks get-credentials --resource-group --name + az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw aks_cluster_name) ``` To verify that you have configured correctly, run the following command: diff --git a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md index fa7947f07f8a7..84b8fab7099f4 100644 --- a/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md +++ b/doc/user/content/self-managed-deployments/upgrading/upgrade-on-gcp.md @@ -60,20 +60,21 @@ The following procedure performs a rolling upgrade, where both the old and new M 1. Configure `kubectl` to connect to your GKE cluster, replacing: - - `` with the name of your GKE cluster. Your cluster name can - be found in the Terraform output. For the sample example, the cluster name - is `-gke`. + - `` with your cluster name; i.e., the + `gke_cluster_name` in the Terraform output. For the sample example, your + cluster name has the form `-gke`; e.g., `simple-demo-gke` - - `` with the region of your GKE cluster. Your region can be - found in the Terraform output `gke_cluster_location`, corresponds to the - `region` value in your `terraform.tfvars`. + - `` with your cluster location; i.e., the + `gke_cluster_location` in the Terraform output. Your + region can also be found in your `terraform.tfvars` file. - `` with your GCP project ID. ```bash - gcloud container clusters get-credentials \ - --region \ - --project + # gcloud container clusters get-credentials --region --project + gcloud container clusters get-credentials $(terraform output -raw gke_cluster_name) \ + --region $(terraform output -raw gke_cluster_location) \ + --project ``` To verify that you have configured correctly, run the following command: From 789a247ff057603f245e92f8c20771f89a12f4a3 Mon Sep 17 00:00:00 2001 From: kay-kim Date: Wed, 10 Dec 2025 14:16:02 -0500 Subject: [PATCH 11/11] docs: tweak for zsh escaping --- .../self-managed-deployments/installation/install-on-aws.md | 4 ++-- .../self-managed-deployments/installation/install-on-azure.md | 4 ++-- .../self-managed-deployments/installation/install-on-gcp.md | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/user/content/self-managed-deployments/installation/install-on-aws.md b/doc/user/content/self-managed-deployments/installation/install-on-aws.md index 89af7a1385c19..9f9cc6275452a 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-aws.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-aws.md @@ -247,7 +247,7 @@ privately connected to it. From the terminal, you can type: ```sh - open https://$(terraform output -raw nlb_dns_name):8080/materialize + open "https://$(terraform output -raw nlb_dns_name):8080/materialize" ``` {{< tip >}} @@ -283,7 +283,7 @@ privately connected to it. - `6875` as the port: ```sh - psql postgres://mz_system@$(terraform output -raw nlb_dns_name):6875/materialize + psql "postgres://mz_system@$(terraform output -raw nlb_dns_name):6875/materialize" ``` When prompted for the password, enter the diff --git a/doc/user/content/self-managed-deployments/installation/install-on-azure.md b/doc/user/content/self-managed-deployments/installation/install-on-azure.md index 7e00a14005a2d..24f44ee809da7 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-azure.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-azure.md @@ -260,7 +260,7 @@ To connect to the Materialize Console, open a browser to From the terminal, you can type: ```sh -open https://$(terraform output -raw console_load_balancer_ip):8080/materialize +open "https://$(terraform output -raw console_load_balancer_ip):8080/materialize" ``` {{< tip >}} @@ -277,7 +277,7 @@ connect to Materialize via PostgreSQL-compatible clients/drivers, such as `psql`: ```bash -psql postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize +psql "postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize" ``` diff --git a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md index 6937d8f6fcf35..3c5edd297f4c7 100644 --- a/doc/user/content/self-managed-deployments/installation/install-on-gcp.md +++ b/doc/user/content/self-managed-deployments/installation/install-on-gcp.md @@ -274,7 +274,7 @@ To connect to the Materialize Console, open a browser to From the terminal, you can type: ```sh -open https://$(terraform output -raw console_load_balancer_ip):8080/materialize +open "https://$(terraform output -raw console_load_balancer_ip):8080/materialize" ``` {{< tip >}} @@ -291,7 +291,7 @@ connect to Materialize via PostgreSQL-compatible clients/drivers, such as `psql`: ```bash -psql postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize +psql "postgres://$(terraform output -raw balancerd_load_balancer_ip):6875/materialize" ``` ## Customizing Your Deployment