diff --git a/examples/deploy/terraform/infra.tfvars b/examples/deploy/terraform/infra.tfvars index cfa3929a6..f4e2dd169 100644 --- a/examples/deploy/terraform/infra.tfvars +++ b/examples/deploy/terraform/infra.tfvars @@ -110,6 +110,12 @@ storage = { size_in_megabytes = 1099511 storage_efficiency_enabled = true } + staging_volume = { + create = true + junction_path = "/trident_domino_staging_vol" + name = trident_domino_staging_vol + size_in_megabytes = 1099511 + } } s3 = { create = true diff --git a/examples/deploy/terraform/infra/README.md b/examples/deploy/terraform/infra/README.md index a002278a0..b0143dd08 100644 --- a/examples/deploy/terraform/infra/README.md +++ b/examples/deploy/terraform/infra/README.md @@ -38,8 +38,7 @@ No resources. | [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes | -| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool, false)
datasync = optional(object({
enabled = optional(bool, false)
target = optional(string, "netapp")
schedule = optional(string, "cron(0 */4 * * ? *)")
verify_mode = optional(string, "ONLY_FILES_TRANSFERRED")
}), {})
}), {})
deployment_type = optional(string, "SINGLE_AZ_1")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 128)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
storage_capacity_autosizing = optional(object({
enabled = optional(bool, false)
threshold = optional(number, 70)
percent_capacity_increase = optional(number, 30)
notification_email_address = optional(string, "")
}), {})
volume = optional(object({
create = optional(bool, true)
name_suffix = optional(string, "domino_shared_storage")
storage_efficiency_enabled = optional(bool, true)
junction_path = optional(string, "/domino")
size_in_megabytes = optional(number, 1099511)
}), {})
}), {})
s3 = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, false)
})
| `{}` | no | -| [tags](#input\_tags) | Deployment tags. | `map(string)` | n/a | yes | +| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
staging\_volume = {
create = Create a staging volume associated with the filesystem
name = The name of the staging volume
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the staging volume
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool, false)
datasync = optional(object({
enabled = optional(bool, false)
target = optional(string, "netapp")
schedule = optional(string, "cron(0 */4 * * ? *)")
verify_mode = optional(string, "ONLY_FILES_TRANSFERRED")
}), {})
}), {})
deployment_type = optional(string, "SINGLE_AZ_1")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 128)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
storage_capacity_autosizing = optional(object({
enabled = optional(bool, false)
threshold = optional(number, 70)
percent_capacity_increase = optional(number, 30)
notification_email_address = optional(string, "")
}), {})
volume = optional(object({
create = optional(bool, true)
name_suffix = optional(string, "domino_shared_storage")
storage_efficiency_enabled = optional(bool, true)
junction_path = optional(string, "/domino")
size_in_megabytes = optional(number, 1099511)
}), {})
staging_volume = optional(object({
create = optional(bool, true)
name = optional(string, "trident_domino_staging_vol")
junction_path = optional(string, "/trident_domino_staging_vol")
size_in_megabytes = optional(number, 1099511)
}), {})
}), {})
s3 = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, false)
})
| `{}` | no || [tags](#input\_tags) | Deployment tags. | `map(string)` | n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | ## Outputs diff --git a/examples/deploy/terraform/infra/variables.tf b/examples/deploy/terraform/infra/variables.tf index 6fcd6ee3c..029db004c 100644 --- a/examples/deploy/terraform/infra/variables.tf +++ b/examples/deploy/terraform/infra/variables.tf @@ -277,6 +277,13 @@ variable "storage" { storage_efficiency_enabled = Toggle storage_efficiency_enabled junction_path = filesystem junction path size_in_megabytes = The size of the volume + } + staging_volume = { + create = Create a staging volume associated with the filesystem + name = The name of the staging volume + junction_path = filesystem junction path + size_in_megabytes = The size of the staging volume + } } s3 = { force_destroy_on_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets. @@ -331,6 +338,12 @@ variable "storage" { junction_path = optional(string, "/domino") size_in_megabytes = optional(number, 1099511) }), {}) + staging_volume = optional(object({ + create = optional(bool, true) + name = optional(string, "trident_domino_staging_vol") + junction_path = optional(string, "/trident_domino_staging_vol") + size_in_megabytes = optional(number, 1099511) + }), {}) }), {}) s3 = optional(object({ create = optional(bool, true) diff --git a/modules/infra/README.md b/modules/infra/README.md index d8985b5fc..bfa27e519 100644 --- a/modules/infra/README.md +++ b/modules/infra/README.md @@ -65,8 +65,7 @@ | [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking.
create\_ecr\_endpoint = Create the VPC Endpoint For ECR.
create\_s3\_endpoint = Create the VPC Endpoint For S3. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
create_ecr_endpoint = optional(bool, true)
create_s3_endpoint = optional(bool, true)
})
| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes | -| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool, false)
datasync = optional(object({
enabled = optional(bool, false)
target = optional(string, "netapp")
schedule = optional(string, "cron(0 */4 * * ? *)")
verify_mode = optional(string, "ONLY_FILES_TRANSFERRED")
}), {})
}), {})
deployment_type = optional(string, "SINGLE_AZ_1")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 128)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
storage_capacity_autosizing = optional(object({
enabled = optional(bool, false)
threshold = optional(number, 70)
percent_capacity_increase = optional(number, 30)
notification_email_address = optional(string, "")
}), {})
volume = optional(object({
create = optional(bool, true)
name_suffix = optional(string, "domino_shared_storage")
storage_efficiency_enabled = optional(bool, true)
junction_path = optional(string, "/domino")
size_in_megabytes = optional(number, 1048576)
}), {})
}), {})
s3 = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, true)
})
| `{}` | no | -| [tags](#input\_tags) | Deployment tags. | `map(string)` | `{}` | no | +| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
staging\_volume = {
create = Create a staging volume associated with the filesystem
name = The name of the staging volume
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the staging volume
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool, false)
datasync = optional(object({
enabled = optional(bool, false)
target = optional(string, "netapp")
schedule = optional(string, "cron(0 */4 * * ? *)")
verify_mode = optional(string, "ONLY_FILES_TRANSFERRED")
}), {})
}), {})
deployment_type = optional(string, "SINGLE_AZ_1")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 128)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
storage_capacity_autosizing = optional(object({
enabled = optional(bool, false)
threshold = optional(number, 70)
percent_capacity_increase = optional(number, 30)
notification_email_address = optional(string, "")
}), {})
volume = optional(object({
create = optional(bool, true)
name_suffix = optional(string, "domino_shared_storage")
storage_efficiency_enabled = optional(bool, true)
junction_path = optional(string, "/domino")
size_in_megabytes = optional(number, 1099511)
}), {})
staging_volume = optional(object({
create = optional(bool, true)
name = optional(string, "trident_domino_staging_vol")
junction_path = optional(string, "/trident_domino_staging_vol")
size_in_megabytes = optional(number, 1099511)
}), {})
}), {})
s3 = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, false)
})
| `{}` | no || [tags](#input\_tags) | Deployment tags. | `map(string)` | `{}` | no | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | | [vpn\_connections](#input\_vpn\_connections) | create = Create a VPN connection.
connections = List of VPN connections, each with:
- name: Name for identification (optional).
- shared\_ip: Customer's shared IP Address (optional).
- cidr\_block: CIDR block for the customer's network (optional). |
object({
create = optional(bool, false)
connections = optional(list(object({
name = optional(string, "")
shared_ip = optional(string, "")
cidr_blocks = optional(list(string), [])
})), [])
})
| `{}` | no | diff --git a/modules/infra/submodules/storage/README.md b/modules/infra/submodules/storage/README.md index 9ffcf44c2..fb717e598 100644 --- a/modules/infra/submodules/storage/README.md +++ b/modules/infra/submodules/storage/README.md @@ -97,8 +97,7 @@ No modules. | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet id
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
| n/a | yes | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | -| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = string
efs = optional(object({
access_point_path = optional(string)
backup_vault = optional(object({
create = optional(bool)
force_destroy = optional(bool)
backup = optional(object({
schedule = optional(string)
cold_storage_after = optional(number)
delete_after = optional(number)
}))
}))
}))
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool)
datasync = optional(object({
enabled = optional(bool)
target = optional(string)
schedule = optional(string)
verify_mode = optional(string)
}))
}))
deployment_type = optional(string)
storage_capacity = optional(number)
throughput_capacity = optional(number)
automatic_backup_retention_days = optional(number)
daily_automatic_backup_start_time = optional(string)
storage_capacity_autosizing = optional(object({
enabled = optional(bool)
threshold = optional(number)
percent_capacity_increase = optional(number)
notification_email_address = optional(string)
}))
volume = optional(object({
name_suffix = optional(string)
storage_efficiency_enabled = optional(bool)
create = optional(bool)
junction_path = optional(string)
size_in_megabytes = optional(number)
}))
}))
s3 = optional(object({
create = optional(bool)
force_destroy_on_deletion = optional(bool)
}))
ecr = optional(object({
create = optional(bool)
force_destroy_on_deletion = optional(bool)
}))
enable_remote_backup = optional(bool)
costs_enabled = optional(bool)
})
| n/a | yes | -| [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | +| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(netapp\|efs\|none)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
netapp = {
migrate\_from\_efs = {
enabled = When enabled, both EFS and NetApp resources will be provisioned simultaneously during the migration period.
datasync = {
enabled = Toggle to enable AWS DataSync for automated data transfer from EFS to NetApp FSx.
schedule = Cron-style schedule for the DataSync task, specifying how often the data transfer will occur (default: hourly).
verify\_mode = One of: POINT\_IN\_TIME\_CONSISTENT, ONLY\_FILES\_TRANSFERRED, NONE.
}
}
deployment\_type = netapp ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups

storage\_capacity\_autosizing = Options for the FXN automatic storage capacity increase, cloudformation template
enabled = Enable automatic storage capacity increase.
threshold = Used storage capacity threshold.
percent\_capacity\_increase = The percentage increase in storage capacity when used storage exceeds
LowFreeDataStorageCapacityThreshold. Minimum increase is 10 %.
notification\_email\_address = The email address for alarm notification.
}
volume = {
create = Create a volume associated with the filesystem.
name\_suffix = The suffix to name the volume
storage\_efficiency\_enabled = Toggle storage\_efficiency\_enabled
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the volume
}
staging\_volume = {
create = Create a staging volume associated with the filesystem
name = The name of the staging volume
junction\_path = filesystem junction path
size\_in\_megabytes = The size of the staging volume
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
netapp = optional(object({
migrate_from_efs = optional(object({
enabled = optional(bool, false)
datasync = optional(object({
enabled = optional(bool, false)
target = optional(string, "netapp")
schedule = optional(string, "cron(0 */4 * * ? *)")
verify_mode = optional(string, "ONLY_FILES_TRANSFERRED")
}), {})
}), {})
deployment_type = optional(string, "SINGLE_AZ_1")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 128)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
storage_capacity_autosizing = optional(object({
enabled = optional(bool, false)
threshold = optional(number, 70)
percent_capacity_increase = optional(number, 30)
notification_email_address = optional(string, "")
}), {})
volume = optional(object({
create = optional(bool, true)
name_suffix = optional(string, "domino_shared_storage")
storage_efficiency_enabled = optional(bool, true)
junction_path = optional(string, "/domino")
size_in_megabytes = optional(number, 1099511)
}), {})
staging_volume = optional(object({
create = optional(bool, true)
name = optional(string, "trident_domino_staging_vol")
junction_path = optional(string, "/trident_domino_staging_vol")
size_in_megabytes = optional(number, 1099511)
}), {})
}), {})
s3 = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
create = optional(bool, true)
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, false)
})
| `{}` | no || [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | ## Outputs diff --git a/modules/infra/submodules/storage/netapp.tf b/modules/infra/submodules/storage/netapp.tf index 4bf9944fb..e6c929b71 100644 --- a/modules/infra/submodules/storage/netapp.tf +++ b/modules/infra/submodules/storage/netapp.tf @@ -234,6 +234,20 @@ resource "aws_fsx_ontap_volume" "eks" { } } +resource "aws_fsx_ontap_staging_volume" "eks" { + count = local.deploy_netapp && var.storage.netapp.staging_volume.create ? 1 : 0 + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.eks[0].id + name = var.storage.netapp.staging_volume.name + junction_path = var.storage.netapp.staging_volume.junction_path + size_in_megabytes = var.storage.netapp.staging_volume.size_in_megabytes + storage_efficiency_enabled = true + security_style = "UNIX" + ontap_volume_type = "RW" + copy_tags_to_backups = true + volume_style = "FLEXVOL" + tags = local.backup_tagging +} + resource "aws_cloudformation_stack" "fsx_ontap_scaling" { count = local.deploy_netapp && var.storage.netapp.storage_capacity_autosizing.enabled ? 1 : 0 diff --git a/modules/infra/submodules/storage/outputs.tf b/modules/infra/submodules/storage/outputs.tf index 9101f85bc..3a0b0ca44 100644 --- a/modules/infra/submodules/storage/outputs.tf +++ b/modules/infra/submodules/storage/outputs.tf @@ -29,8 +29,13 @@ output "info" { creds_secret_arn = aws_secretsmanager_secret.netapp["svm"].arn } filesystem = { id = aws_fsx_ontap_file_system.eks[0].id, security_group_id = aws_security_group.netapp[0].id } - volume = { - name = aws_fsx_ontap_volume.eks[0].name + volumes = { + primary = { + name = aws_fsx_ontap_volume.eks[0].name + } + staging = { + name = aws_fsx_ontap_staging_volume.eks[0].name + } } } : null s3 = { diff --git a/modules/infra/submodules/storage/variables.tf b/modules/infra/submodules/storage/variables.tf index 6a2b1660b..4345e27c8 100644 --- a/modules/infra/submodules/storage/variables.tf +++ b/modules/infra/submodules/storage/variables.tf @@ -75,6 +75,13 @@ variable "storage" { storage_efficiency_enabled = Toggle storage_efficiency_enabled junction_path = filesystem junction path size_in_megabytes = The size of the volume + } + staging_volume = { + create = Create a staging volume associated with the filesystem + name = The name of the staging volume + junction_path = filesystem junction path + size_in_megabytes = The size of the staging volume + } } s3 = { force_destroy_on_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets. @@ -129,6 +136,12 @@ variable "storage" { junction_path = optional(string) size_in_megabytes = optional(number) })) + staging_volume = optional(object({ + create = optional(bool) + junction_path = optional(string) + name = optional(string) + size_in_megabytes = optional(number) + })) })) s3 = optional(object({ create = optional(bool) diff --git a/modules/infra/variables.tf b/modules/infra/variables.tf index 0d4280771..75b82904b 100644 --- a/modules/infra/variables.tf +++ b/modules/infra/variables.tf @@ -426,6 +426,13 @@ variable "storage" { storage_efficiency_enabled = Toggle storage_efficiency_enabled junction_path = filesystem junction path size_in_megabytes = The size of the volume + } + staging_volume = { + create = Create a staging volume associated with the filesystem + name = The name of the staging volume + junction_path = filesystem junction path + size_in_megabytes = The size of the staging volume + } } s3 = { force_destroy_on_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets. @@ -480,6 +487,12 @@ variable "storage" { junction_path = optional(string, "/domino") size_in_megabytes = optional(number, 1048576) }), {}) + staging_volume = optional(object({ + create = optional(bool, true) + name = optional(string, "trident_domino_staging_vol") + junction_path = optional(string, "/trident_domino_staging_vol") + size_in_megabytes = optional(number, 1099511) + }), {}) }), {}) s3 = optional(object({ create = optional(bool, true) diff --git a/tests/plan/terraform/variables.tf b/tests/plan/terraform/variables.tf index 0fdf6a33b..a7f7a0cce 100644 --- a/tests/plan/terraform/variables.tf +++ b/tests/plan/terraform/variables.tf @@ -388,6 +388,13 @@ variable "storage" { storage_efficiency_enabled = Toggle storage_efficiency_enabled junction_path = filesystem junction path size_in_megabytes = The size of the volume + } + staging_volume = { + create = Create a staging volume associated with the filesystem + name = The name of the staging volume + junction_path = filesystem junction path + size_in_megabytes = The size of the staging volume + } } s3 = { force_destroy_on_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets. @@ -442,6 +449,12 @@ variable "storage" { junction_path = optional(string, "/domino") size_in_megabytes = optional(number, 1099511) }), {}) + staging_volume = optional(object({ + create = optional(bool, true) + name = optional(string, "trident_domino_staging_vol") + junction_path = optional(string, "/trident_domino_staging_vol") + size_in_megabytes = optional(number, 1099511) + }), {}) }), {}) s3 = optional(object({ create = optional(bool, true)