Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,12 @@ myip-default
__pycache__/
upgrade_status.json
.coverage

.vscode/*
*.log
*.txt
*.bin
*.exe
*.rpm
examples/azure/poc/dsf_deployment/y/*
*.msi
*.tfstate*
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,15 @@ The following table lists the _latest_ DSF Kit releases, their release date and
2. Improvements and bug fixes.
</td>
</tr>
<tr>
<td>TBD
</td>
<td>1.7.35</td>
<td>
1. Added support for Elastic IP (EIP) pool for AWS deployments. Set 'use_eip_pool' to true and specify 'eip_pool_tag' to use pre-allocated EIPs with predictable IP addresses.
<br/>2. Improvements and bug fixes.
</td>
</tr>

</table>

Expand Down
9 changes: 9 additions & 0 deletions examples/aws/poc/dsf_deployment/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,15 @@ Several variables in the `variables.tf` file are important for configuring the d

### Networking
- `subnet_ids`: IDs of the subnets for the deployment. If not specified, a new vpc is created.
- `use_eip_pool`: Set to `true` to use pre-allocated Elastic IPs from a pool instead of creating new ones. Default: `false`
- `eip_pool_tag`: AWS tag value to identify the EIP pool (e.g., `dsf-eip-pool`). Only used when `use_eip_pool = true`. EIPs must be tagged with `Pool=<eip_pool_tag>` in AWS before deployment.

> **EIP Pool — Fixed Slot Distribution**: The pool uses fixed slot positions to ensure IP stability:
> - Slot 0: Hub Main, Slot 1: Hub DR, Slot 2: MX, Slot 3: DRA Admin, Slots 4+: CipherTrust Managers, then CTE/DDC Agents.
> - Enabling/disabling modules (sonar, dam, dra) does **not** shift EIPs for other resources.
> - The pool must contain enough EIPs to cover through the highest used slot (some lower slots may be unused).
> - Changing `ciphertrust_manager_count` will shift agent EIP assignments; for full stability, supply specific `eip_allocation_id` values directly to each module.
> - All pool EIPs must be unassociated before the first deployment. A validation check will warn if any pool EIPs are already associated to non-managed resources.

### Audit Sources for Simulation Purposes
- `simulation_db_types_for_agentless`: Types of databases to provision and onboard to an Agentless Gateway
Expand Down
23 changes: 19 additions & 4 deletions examples/aws/poc/dsf_deployment/cm.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ locals {
}

module "ciphertrust_manager" {
source = "imperva/dsf-ciphertrust-manager/aws"
version = "1.7.34" # latest release tag
count = local.ciphertrust_manager_count
source = "imperva/dsf-ciphertrust-manager/aws"
version = "1.7.34" # latest release tag
count = local.ciphertrust_manager_count
ciphertrust_manager_version = var.ciphertrust_manager_version
ami = var.ciphertrust_manager_ami_id == null ? null : {
id = var.ciphertrust_manager_ami_id
Expand All @@ -20,6 +20,7 @@ module "ciphertrust_manager" {
subnet_id = local.ciphertrust_manager_subnet_id
cm_password = local.password
attach_persistent_public_ip = true
eip_allocation_id = length(local.ciphertrust_manager_eip_allocation_ids) > 0 ? local.ciphertrust_manager_eip_allocation_ids[count.index] : null
key_pair = module.key_pair.key_pair.key_pair_name
allowed_web_console_and_api_cidrs = concat(local.workstation_cidr, var.web_console_cidr)
allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
Expand All @@ -33,7 +34,20 @@ module "ciphertrust_manager" {
]
}

# When using pooled EIPs, the public IP is known immediately but the association takes time
# Add a delay to ensure the EIP is associated before the provider tries to connect
resource "time_sleep" "wait_for_ciphertrust_eip" {
count = local.ciphertrust_manager_count > 0 && var.use_eip_pool ? 1 : 0

depends_on = [
module.ciphertrust_manager
]

create_duration = "30s"
}

provider "ciphertrust" {
# Use public IP for connectivity from local Mac/CI runners
address = local.ciphertrust_manager_count > 0 ? "https://${coalesce(module.ciphertrust_manager[0].public_ip, module.ciphertrust_manager[0].private_ip)}" : null
username = local.ciphertrust_manager_web_console_username
password = local.password
Expand All @@ -46,7 +60,8 @@ resource "ciphertrust_trial_license" "trial_license" {
flag = "activate"

depends_on = [
module.ciphertrust_manager
module.ciphertrust_manager,
time_sleep.wait_for_ciphertrust_eip # Ensure EIP is associated before connecting
]
}

Expand Down
1 change: 1 addition & 0 deletions examples/aws/poc/dsf_deployment/cte_ddc_agents.tf
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ module "cte_ddc_agents" {
}
os_type = each.value.os_type
attach_persistent_public_ip = true
eip_allocation_id = lookup(local.cte_agent_eip_allocation_ids, each.key, null)
use_public_ip = true
allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
allowed_rdp_cidrs = each.value.os_type == "Windows" ? concat(local.workstation_cidr, var.allowed_ssh_cidrs) : []
Expand Down
1 change: 1 addition & 0 deletions examples/aws/poc/dsf_deployment/dam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ module "mx" {
port = 8443
} : null
attach_persistent_public_ip = true
eip_allocation_id = local.mx_eip_allocation_id
large_scale_mode = var.large_scale_mode.mx

create_server_group = length(var.simulation_db_types_for_agent) > 0
Expand Down
4 changes: 2 additions & 2 deletions examples/aws/poc/dsf_deployment/dra.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ module "dra_admin" {
allowed_hub_cidrs = local.hub_cidr_list
allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
attach_persistent_public_ip = true
eip_allocation_id = local.dra_admin_eip_allocation_id

tags = local.tags
depends_on = [
Expand All @@ -31,8 +32,7 @@ module "dra_admin" {
module "dra_analytics" {
source = "imperva/dsf-dra-analytics/aws"
version = "1.7.34" # latest release tag

count = local.dra_analytics_count
count = local.dra_analytics_count
name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
subnet_id = local.dra_analytics_subnet_id
dra_version = module.globals.dra_version
Expand Down
162 changes: 162 additions & 0 deletions examples/aws/poc/dsf_deployment/eip_pool.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
# Query AWS for all EIPs in the pool (associated and unassociated)
# We need ALL pool EIPs for distribution since on subsequent applies our own
# associations make them "associated". The aws_eip_association resource is
# idempotent - associating an EIP to the same instance it's already on is a no-op.
data "aws_eips" "pool" {
count = var.use_eip_pool ? 1 : 0

filter {
Copy link
Collaborator

@roiklorin roiklorin Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what happens if there are multiple deployment with the same eip_pool_tag competing for the same EIPs?
maybe we can document this limitation

name = "tag:Pool"
values = [var.eip_pool_tag]
}
}

# Query only UNASSOCIATED pool EIPs for validation purposes.
# This catches user errors like tagging an EIP that's already in use by
# a non-managed resource. On first deploy all pool EIPs should be unassociated.
# On subsequent applies, pool EIPs associated to our instances are expected.
data "aws_eips" "pool_available" {
count = var.use_eip_pool ? 1 : 0

filter {
name = "tag:Pool"
values = [var.eip_pool_tag]
}

filter {
name = "association-id"
values = [""] # Empty association-id means unassociated
}
}

# Create locals to distribute allocation IDs to resources
locals {
# Get sorted list of ALL allocation IDs from pool.
# Sort ensures stable ordering across API calls - same EIP always gets same index.
eip_pool_all_allocation_ids = var.use_eip_pool ? sort(data.aws_eips.pool[0].allocation_ids) : []

# Count of unassociated EIPs in the pool (for validation)
eip_pool_available_count = var.use_eip_pool ? length(data.aws_eips.pool_available[0].allocation_ids) : 0

# Count of already-associated EIPs in the pool
eip_pool_associated_count = var.use_eip_pool ? (
length(local.eip_pool_all_allocation_ids) - local.eip_pool_available_count
) : 0

# Total pool EIPs available
eip_pool_total_count = length(local.eip_pool_all_allocation_ids)

# ============================================================================
# Fixed slot positions for singleton resources
# These positions NEVER change regardless of which modules are enabled/disabled.
# This ensures that enabling/disabling sonar, dam, or dra does not shift the
# EIP assigned to other resources.
#
# Slot layout:
# 0: hub_main
# 1: hub_dr
# 2: mx
# 3: dra_admin
# 4+: ciphertrust_managers (up to ciphertrust_manager_count)
# 4+cm_count+: cte/ddc agents
#
# Trade-off: Some pool slots may be unused if a module is disabled, but
# positions are guaranteed stable across configuration changes.
#
# Note: Changing ciphertrust_manager_count will shift agent positions.
# This is acceptable as CM count changes are rare in practice.
# ============================================================================
hub_main_eip_index = 0
hub_dr_eip_index = 1
mx_eip_index = 2
dra_admin_eip_index = 3
ciphertrust_manager_eip_start_index = 4
cte_agent_eip_start_index = 4 + (var.enable_ciphertrust ? var.ciphertrust_manager_count : 0)

# Total agents count
total_agent_count = (
var.cte_ddc_agents_linux_count +
var.cte_agents_linux_count +
var.ddc_agents_linux_count +
var.cte_ddc_agents_windows_count +
var.cte_agents_windows_count +
var.ddc_agents_windows_count
)

# Calculate the highest slot index needed (for pool size validation)
# We use max() to find the highest slot that's actually in use
eip_pool_highest_slot = max(
var.enable_sonar ? local.hub_main_eip_index : -1,
var.enable_sonar && var.hub_hadr ? local.hub_dr_eip_index : -1,
var.enable_dam ? local.mx_eip_index : -1,
var.enable_dra ? local.dra_admin_eip_index : -1,
var.enable_ciphertrust && var.ciphertrust_manager_count > 0 ? (
local.ciphertrust_manager_eip_start_index + var.ciphertrust_manager_count - 1
) : -1,
var.enable_ciphertrust && local.total_agent_count > 0 ? (
local.cte_agent_eip_start_index + local.total_agent_count - 1
) : -1,
)

# Pool needs enough EIPs to cover through the highest used slot
eip_count_needed = local.eip_pool_highest_slot + 1

# Validate we have enough IPs
eip_pool_valid = !var.use_eip_pool || local.eip_pool_total_count >= local.eip_count_needed

# Assign specific allocation IDs to each resource using fixed slot positions
hub_main_eip_allocation_id = var.use_eip_pool && var.enable_sonar ? local.eip_pool_all_allocation_ids[local.hub_main_eip_index] : null
hub_dr_eip_allocation_id = var.use_eip_pool && var.enable_sonar && var.hub_hadr ? local.eip_pool_all_allocation_ids[local.hub_dr_eip_index] : null
mx_eip_allocation_id = var.use_eip_pool && var.enable_dam ? local.eip_pool_all_allocation_ids[local.mx_eip_index] : null
dra_admin_eip_allocation_id = var.use_eip_pool && var.enable_dra ? local.eip_pool_all_allocation_ids[local.dra_admin_eip_index] : null

# For CipherTrust Managers, create a list of allocation IDs
ciphertrust_manager_eip_allocation_ids = var.use_eip_pool && var.enable_ciphertrust ? [
for i in range(var.ciphertrust_manager_count) :
local.eip_pool_all_allocation_ids[local.ciphertrust_manager_eip_start_index + i]
] : []

# For CTE/DDC agents, create a map of allocation IDs keyed by agent ID
# This matches the structure of local.all_agent_instances_map from cte_ddc_agents.tf
# Sort keys to ensure stable ordering - same agent always gets same pool EIP
cte_agent_eip_allocation_ids = var.use_eip_pool && var.enable_ciphertrust ? {
for idx, instance_id in sort(keys(local.all_agent_instances_map)) :
instance_id => local.eip_pool_all_allocation_ids[local.cte_agent_eip_start_index + idx]
} : {}
}

# Validation checks
resource "null_resource" "eip_pool_validation" {
count = var.use_eip_pool ? 1 : 0

lifecycle {
# Validate pool has enough EIPs for the fixed slot layout
precondition {
condition = local.eip_pool_valid
error_message = <<EOF
EIP Pool Error: Not enough IPs in pool!
Pool tag: ${var.eip_pool_tag}
IPs in pool: ${local.eip_pool_total_count}
IPs needed (highest slot + 1): ${local.eip_count_needed}

The pool uses fixed slot positions for stable IP assignment:
- Slot 0 (Hub Main): ${var.enable_sonar ? "USED" : "unused"}
- Slot 1 (Hub DR): ${var.enable_sonar && var.hub_hadr ? "USED" : "unused"}
- Slot 2 (MX): ${var.enable_dam ? "USED" : "unused"}
- Slot 3 (DRA Admin): ${var.enable_dra ? "USED" : "unused"}
- Slots 4+ (CipherTrust Managers): ${var.enable_ciphertrust ? var.ciphertrust_manager_count : 0}
- Slots ${local.cte_agent_eip_start_index}+ (CTE/DDC Agents): ${var.enable_ciphertrust ? local.total_agent_count : 0}

Note: Some slots may be unused (disabled modules) but still require pool EIPs
at lower indices. This ensures IP stability when modules are toggled.

Please allocate more EIPs with tag Pool=${var.eip_pool_tag}:
aws ec2 allocate-address --domain vpc --tag-specifications 'ResourceType=elastic-ip,Tags=[{Key=Pool,Value=${var.eip_pool_tag}}]'
EOF
}

# Note: Previously had a check for associated EIPs exceeding expected count,
# but this was overly restrictive. EIP associations are idempotent and will
# be properly managed by terraform regardless of current association state.
}
}
2 changes: 2 additions & 0 deletions examples/aws/poc/dsf_deployment/sonar.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ module "hub_main" {
ebs = var.hub_ebs_details
instance_type = var.hub_instance_type
attach_persistent_public_ip = true
eip_allocation_id = local.hub_main_eip_allocation_id
use_public_ip = true
generate_access_tokens = true
ssh_key_pair = {
Expand Down Expand Up @@ -80,6 +81,7 @@ module "hub_dr" {
ebs = var.hub_ebs_details
instance_type = var.hub_instance_type
attach_persistent_public_ip = true
eip_allocation_id = local.hub_dr_eip_allocation_id
use_public_ip = true
hadr_dr_node = true
main_node_sonarw_public_key = module.hub_main[0].sonarw_public_key
Expand Down
25 changes: 25 additions & 0 deletions examples/aws/poc/dsf_deployment/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -494,4 +494,29 @@ variable "create_fam_classification_integration_resources" {
type = bool
default = false
description = "Whether to create the AWS S3 and SQS resources required for FAM classification integration between Hub and CipherTrust."
}

variable "use_eip_pool" {
type = bool
default = false
description = <<EOF
Whether to use pre-allocated Elastic IPs from a pool for all resources.
When true, Terraform queries AWS for EIPs tagged with eip_pool_tag and distributes them to resources.
When false (default), Terraform creates and manages new EIPs.

IMPORTANT: Before setting to true, you must:
1. Manually create EIPs in AWS with the Pool tag
2. Ensure you have at least as many EIPs as resources that need them
3. Run: aws ec2 describe-addresses --filters "Name=tag:Pool,Values=<your-tag>"
EOF
}

variable "eip_pool_tag" {
type = string
default = "dsf-eip-pool"
description = <<EOF
The Pool tag value used to identify the EIP pool in AWS.
Only used when use_eip_pool = true.
Example: "dsf-eip-pool" will query for EIPs tagged with Pool=dsf-eip-pool
EOF
}
1 change: 1 addition & 0 deletions modules/aws/agent-gw/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ module "agent_gw" {
timeout = local.timeout
}
attach_persistent_public_ip = false
eip_allocation_id = var.eip_allocation_id
tags = var.tags
send_usage_statistics = var.send_usage_statistics
}
16 changes: 16 additions & 0 deletions modules/aws/agent-gw/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,22 @@ variable "gateway_group_name" {
}
}

variable "eip_allocation_id" {
type = string
default = null
description = <<EOF
Optional: Allocation ID of an existing Elastic IP to use instead of creating a new one.
When provided, Terraform will associate this EIP instead of creating a new one.
When null (default), Terraform creates and manages a new EIP (current behavior).
Example: "eipalloc-0123456789abcdef0"
EOF

validation {
condition = var.eip_allocation_id == null || can(regex("^eipalloc-[a-f0-9]{8,}$", var.eip_allocation_id))
error_message = "eip_allocation_id must be in format 'eipalloc-xxxxxxxxx' or null"
}
}

variable "tags" {
description = "A map of tags to add to all resources"
type = map(string)
Expand Down
1 change: 1 addition & 0 deletions modules/aws/agentless-gw/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ module "gw_instance" {
terraform_script_path_folder = var.terraform_script_path_folder
use_public_ip = var.use_public_ip
attach_persistent_public_ip = false
eip_allocation_id = var.eip_allocation_id
sonarw_private_key_secret_name = var.sonarw_private_key_secret_name
sonarw_public_key_content = var.sonarw_public_key_content
volume_attachment_device_name = var.volume_attachment_device_name
Expand Down
Loading