From 707404115aa70847ffde72d10d99041f91537f7e Mon Sep 17 00:00:00 2001 From: MyroslavLevchyk Date: Fri, 8 Aug 2025 17:16:19 +0300 Subject: [PATCH 1/2] feat: disable access to DBFS root --- README.md | 31 +++++++++++++++---------------- main.tf | 8 ++++---- variables.tf | 25 +++++++++++++------------ 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 3548c60..fd587ce 100644 --- a/README.md +++ b/README.md @@ -374,6 +374,7 @@ No modules. | [databricks_cluster_policy.overrides](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) | resource | | [databricks_cluster_policy.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) | resource | | [databricks_database_instance.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/database_instance) | resource | +| [databricks_disable_legacy_dbfs_setting.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/disable_legacy_dbfs_setting) | resource | | [databricks_entitlements.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/entitlements) | resource | | [databricks_group.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/group) | resource | | [databricks_ip_access_list.allowed_list](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/ip_access_list) | resource | @@ -387,7 +388,6 @@ No modules. | [databricks_secret_scope.main](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/secret_scope) | resource | | [databricks_secret_scope.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/secret_scope) | resource | | [databricks_sql_endpoint.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/sql_endpoint) | resource | -| [databricks_system_schema.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/system_schema) | resource | | [databricks_token.pat](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/token) | resource | | [databricks_workspace_conf.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/workspace_conf) | resource | | [databricks_current_metastore.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/current_metastore) | data source | @@ -399,24 +399,23 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [cloud\_name](#input\_cloud\_name) | Cloud Name | `string` | n/a | yes | -| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | -| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | -| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | -| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | -| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | -| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | -| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | -| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | -| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | -| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | +| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | +| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | +| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | +| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | +| [disable\_legacy\_dbfs](#input\_disable\_legacy\_dbfs) | Disables access to DBFS root and mounts in your existing Databricks workspace.
When set to true:
- Access to DBFS root and mounted paths is blocked.
- Manual restart of all-purpose compute clusters and SQL warehouses is required after enabling this setting.
- Note: This setting only takes effect when disabling access. Re-enabling must be done manually via the Databricks UI. | `bool` | `false` | no | +| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | +| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | +| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | +| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | +| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | +| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | | [mount\_enabled](#input\_mount\_enabled) | Boolean flag that determines whether mount point for storage account filesystem is created | `bool` | `false` | no | -| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | +| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | | [pat\_token\_lifetime\_seconds](#input\_pat\_token\_lifetime\_seconds) | The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely | `number` | `315569520` | no | -| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | -| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | +| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | +| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | | [suffix](#input\_suffix) | Optional suffix that would be added to the end of resources names. | `string` | `""` | no | -| [system\_schemas](#input\_system\_schemas) | Set of strings with all possible System Schema names | `set(string)` |
[
"access",
"compute",
"marketplace",
"storage",
"serving",
"query",
"lakeflow"
]
| no | -| [system\_schemas\_enabled](#input\_system\_schemas\_enabled) | System Schemas only works with assigned Unity Catalog Metastore. Boolean flag to enabled this feature | `bool` | `false` | no | | [workspace\_admin\_token\_enabled](#input\_workspace\_admin\_token\_enabled) | Boolean flag to specify whether to create Workspace Admin Token | `bool` | n/a | yes | ## Outputs diff --git a/main.tf b/main.tf index fecb57d..a8e3e27 100644 --- a/main.tf +++ b/main.tf @@ -16,8 +16,8 @@ resource "databricks_token" "pat" { lifetime_seconds = var.pat_token_lifetime_seconds } -resource "databricks_system_schema" "this" { - for_each = var.system_schemas_enabled ? var.system_schemas : toset([]) - - schema = each.value +resource "databricks_disable_legacy_dbfs_setting" "this" { + disable_legacy_dbfs { + value = var.disable_legacy_dbfs + } } diff --git a/variables.tf b/variables.tf index 15e4f59..2382701 100644 --- a/variables.tf +++ b/variables.tf @@ -215,18 +215,6 @@ variable "mountpoints" { default = {} } -variable "system_schemas" { - type = set(string) - description = "Set of strings with all possible System Schema names" - default = ["access", "compute", "marketplace", "storage", "serving", "query", "lakeflow"] -} - -variable "system_schemas_enabled" { - type = bool - description = "System Schemas only works with assigned Unity Catalog Metastore. Boolean flag to enabled this feature" - default = false -} - variable "default_cluster_policies_override" { type = list(object({ name = string @@ -292,3 +280,16 @@ To deploy and use an OLTP database instance in Databricks: - Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. DESCRIPTION } + +# Disable access to DBFS root +variable "disable_legacy_dbfs" { + type = bool + default = false + description = < Date: Fri, 8 Aug 2025 15:04:38 +0000 Subject: [PATCH 2/2] terraform-docs: automated action --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index fd587ce..dede637 100644 --- a/README.md +++ b/README.md @@ -399,22 +399,22 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [cloud\_name](#input\_cloud\_name) | Cloud Name | `string` | n/a | yes | -| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | -| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | -| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | -| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | -| [disable\_legacy\_dbfs](#input\_disable\_legacy\_dbfs) | Disables access to DBFS root and mounts in your existing Databricks workspace.
When set to true:
- Access to DBFS root and mounted paths is blocked.
- Manual restart of all-purpose compute clusters and SQL warehouses is required after enabling this setting.
- Note: This setting only takes effect when disabling access. Re-enabling must be done manually via the Databricks UI. | `bool` | `false` | no | -| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | -| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | -| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | -| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | -| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | -| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | +| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | +| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | +| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | +| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | +| [disable\_legacy\_dbfs](#input\_disable\_legacy\_dbfs) | Disables access to DBFS root and mounts in your existing Databricks workspace.
When set to true:
- Access to DBFS root and mounted paths is blocked.
- Manual restart of all-purpose compute clusters and SQL warehouses is required after enabling this setting.
- Note: This setting only takes effect when disabling access. Re-enabling must be done manually via the Databricks UI. | `bool` | `false` | no | +| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | +| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | +| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | +| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | +| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | +| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | | [mount\_enabled](#input\_mount\_enabled) | Boolean flag that determines whether mount point for storage account filesystem is created | `bool` | `false` | no | -| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | +| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | | [pat\_token\_lifetime\_seconds](#input\_pat\_token\_lifetime\_seconds) | The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely | `number` | `315569520` | no | -| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | -| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | +| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | +| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | | [suffix](#input\_suffix) | Optional suffix that would be added to the end of resources names. | `string` | `""` | no | | [workspace\_admin\_token\_enabled](#input\_workspace\_admin\_token\_enabled) | Boolean flag to specify whether to create Workspace Admin Token | `bool` | n/a | yes |