From 0d7351db2cb92ee17beb771326622e6ff3c70a01 Mon Sep 17 00:00:00 2001 From: "linda.nasredin" Date: Thu, 6 Jul 2023 17:03:01 +0300 Subject: [PATCH] EDSF-290 Create full DSF installation example --- .../dsf_single_account_deployment/README.md | 162 +++++++++++ .../agent_sources.tf | 31 ++ .../agentless_sources.tf | 61 ++++ .../dsf_single_account_deployment/backend.tf | 9 + .../dsf_single_account_deployment/dam.tf | 83 ++++++ .../dsf_single_account_deployment/dra.tf | 51 ++++ .../dsf_deployment.zip | Bin 0 -> 12156 bytes .../dsf_single_account_deployment/main.tf | 33 +++ .../networking.tf | 64 +++++ .../dsf_single_account_deployment/outputs.tf | 180 ++++++++++++ .../dsf_single_account_deployment/sonar.tf | 222 ++++++++++++++ .../variables.tf | 271 ++++++++++++++++++ .../dsf_single_account_deployment/versions.tf | 13 + .../sonar_multi_account_deployment/README.md | 89 +++--- .../sonar_single_account_deployment/README.md | 69 +++-- examples/poc/dsf_deployment/README.md | 2 +- examples/poc/dsf_deployment/variables.tf | 2 +- 17 files changed, 1270 insertions(+), 72 deletions(-) create mode 100644 examples/installation/dsf_single_account_deployment/README.md create mode 100644 examples/installation/dsf_single_account_deployment/agent_sources.tf create mode 100644 examples/installation/dsf_single_account_deployment/agentless_sources.tf create mode 100644 examples/installation/dsf_single_account_deployment/backend.tf create mode 100644 examples/installation/dsf_single_account_deployment/dam.tf create mode 100644 examples/installation/dsf_single_account_deployment/dra.tf create mode 100644 examples/installation/dsf_single_account_deployment/dsf_deployment.zip create mode 100644 examples/installation/dsf_single_account_deployment/main.tf create mode 100644 examples/installation/dsf_single_account_deployment/networking.tf create mode 100644 examples/installation/dsf_single_account_deployment/outputs.tf create mode 100644 examples/installation/dsf_single_account_deployment/sonar.tf create mode 100644 examples/installation/dsf_single_account_deployment/variables.tf create mode 100644 examples/installation/dsf_single_account_deployment/versions.tf diff --git a/examples/installation/dsf_single_account_deployment/README.md b/examples/installation/dsf_single_account_deployment/README.md new file mode 100644 index 000000000..9e8ca1c6a --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/README.md @@ -0,0 +1,162 @@ +# DSF Single Account Deployment example +[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags) + +This example provides a full DSF (Data Security Fabric) deployment with DSF Hub, Agentless Gateways, DAM (Database Activity Monitoring) and DRA (Data Risk Analytics); +deployed in a single account and two regions. + +This deployment consists of: + +1. Primary and secondary DSF Hub in region X +2. Primary and secondary Agentless Gateway Hub in region Y +3. DAM MX in region X +4. DAM Agent Gateway in region Y +5. DRA Admin in region X +6. DRA Analytics server in region Y +7. DSF Hub HADR setup +8. Agentless Gateway HADR setup +9. Federation of both primary and secondary DSF Hub with all primary and secondary Agentless Gateways +10. Integration from MX to DSF Hub (Audit from Agent source and Security Issues) + +This example is intended for Professional Service and customers who want to bring their own networking, security groups, etc.
+It is mandatory to provide as input to this example the following variables: +1. The AWS profile of the DSF nodes' AWS account +2. The AWS regions of the DSF nodes +3. The subnets in which to deploy the DSF nodes, they can be in the same or in different subnets + +It is not mandatory to provide the security groups Ids of the DSF nodes, but in case they are provided, you should add the relevant CIDRs and ports to the security groups before running the deployment.
+ + +## Modularity +The deployment is modular and allows users to deploy one or more of the following modules: + +1. Sonar + - DSF Hub + - DSF Hub secondary HADR (High Availability Disaster Recovery) node + - Agentless Gateways + - Agentless Gateways secondary HADR (High Availability Disaster Recovery) nodes +2. DAM + - MX + - Agent Gateways +3. DRA + - Admin server + - Analytic servers + +### Deploying Specific Modules + +To deploy specific modules, you can customize the deployment by setting the corresponding variables in your Terraform configuration. Here are the instructions to deploy the following specific modules: + +#### 1. DAM Only Deployment + +To deploy only the DAM module, set the following variables in your Terraform configuration: +``` +enable_dam = true +enable_dsf_hub = false +enable_dra = false +``` + +This configuration will enable the DAM module while disabling the DSF Hub and DRA modules. + +#### 2. DRA Only Deployment + +To deploy only the DRA module, set the following variables in your Terraform configuration: +``` +enable_dam = false +enable_dsf_hub = false +enable_dra = true +``` + +This configuration will enable the DRA module while disabling the DSF Hub and DAM modules. + +#### 3. Sonar Only Deployment + +To deploy only the Sonar module, set the following variables in your Terraform configuration: +``` +enable_dam = false +enable_dsf_hub = true +enable_dra = false +``` + +This configuration will enable the Sonar module, including the DSF Hub, while disabling the DAM and DRA modules. + +Feel free to customize your deployment by setting the appropriate variables based on your requirements. + + +## Variables +Several variables in the `variables.tf` file are important for configuring the deployment. The following variables dictate the deployment content and should be paid more attention to: + +### Products +- `enable_dsf_hub`: Enable DSF Hub module +- `enable_dam`: Enable DAM module +- `enable_dra`: Enable DRA module + +### Server Count +- `dra_analytics_server_count`: Number of DRA analytic servers +- `agentless_gw_count`: Number of Agentless Gateways +- `agent_gw_count`: Number of Agent Gateways + +### High Availability (HADR) +- `hub_hadr`: Enable DSF Hub High Availability Disaster Recovery (HADR) +- `agentless_gw_hadr`: Enable Agentless Gateway High Availability Disaster Recovery (HADR) + +### Networking +- `subnet_ids`: IDs of the subnets for the deployment + +### + +For a full list of this example's customization options which don't require code changes, refer to the [variables.tf](./variables.tf) file. + +### Customizing Variables + +There are various ways to customize variables in Terraform, in this example, it is recommended to create a 'terrafrom.tfvars' +file in the example's directory, and add the customized variables to it. + +For example: + + ```tf + aws_profile = "myProfile" + aws_region_x = "us-east-1" + aws_region_y = "us-east-2" + subnet_ids= { + hub_primary_subnet_id = "subnet-xxxxxxxxxxxxxxxx1" + hub_secondary_subnet_id = "subnet-xxxxxxxxxxxxxxxx2" + agentless_gw_primary_subnet_id = "subnet-xxxxxxxxxxxxxxxx3" + agentless_gw_secondary_subnet_id = "subnet-xxxxxxxxxxxxxxxx4" + mx_subnet_id = "subnet-xxxxxxxxxxxxxxxx5" + agent_gw_subnet_id = "subnet-xxxxxxxxxxxxxxxx6" + dra_admin_subnet_id = "subnet-xxxxxxxxxxxxxxxx7" + dra_analytics_subnet_id = "subnet-xxxxxxxxxxxxxxxx8" + } + security_group_ids_hub = ["sg-xxxxxxxxxxxxxxxx11", "sg-xxxxxxxxxxxxxxxx12"] + security_group_ids_agentless_gw = ["sg-xxxxxxxxxxxxxxxx21", "sg-xxxxxxxxxxxxxxxx22"] + security_group_ids_mx = ["sg-xxxxxxxxxxxxxxxx31", "sg-xxxxxxxxxxxxxxxx32"] + security_group_ids_agent_gw = ["sg-xxxxxxxxxxxxxxxx41", "sg-xxxxxxxxxxxxxxxx42"] + security_group_ids_dra_admin = ["sg-xxxxxxxxxxxxxxxx51", "sg-xxxxxxxxxxxxxxxx52"] + security_group_ids_dra_analytics = ["sg-xxxxxxxxxxxxxxxx61", "sg-xxxxxxxxxxxxxxxx62"] + tarball_location = { + s3_bucket = "bucket_name" + s3_region = "us-east-1" + s3_key = "tarball_name" + } + workstation_cidr = ["10.0.0.0/24"] + ``` + +Then run the deployment as usual with the following command: + ```bash + terraform apply + ``` +For a full list of this example's customization options which don't require code changes, refer to the [variables.tf](./variables.tf) file. + +## Storing the Terraform State in an S3 Bucket + +To store the Terraform state in an S3 bucket instead of locally, uncomment the '[backend.tf](./backend.tf)' file and fill in the necessary details. +Make sure that the user running the deployment has read and write access to this bucket. You can find the required permissions [here](https://developer.hashicorp.com/terraform/language/settings/backends/s3#s3-bucket-permissions). + +## Deploying DSF Nodes without Outbound Internet Access + +Follow these steps to deploy a DSF node (Hub, Agentless Gateway, MX, Agent Gateway, DAR Admin or DRA Analytics server) in an environment without outbound internet access. +1. Provide a custom AMI with the following dependencies: AWS CLI, unzip, lvm2 and jq. + You can create a custom AMI with these dependencies installed by launching an Amazon EC2 instance, installing the dependencies, and creating an AMI from the instance. + You can then use this custom AMI when launching the DSF Hub and/or Agentless Gateway instances. +2. Update the _ami_ variable in your Terraform example with the details of the custom AMI you created. +3. Create an S3 VPC endpoint to allow the instances to access S3 without going over the internet. You can create an S3 VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. +4. Create a Secrets Manager VPC endpoint to allow the instances to access the Secrets Manager without going over the internet. You can create a Secrets Manager VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. diff --git a/examples/installation/dsf_single_account_deployment/agent_sources.tf b/examples/installation/dsf_single_account_deployment/agent_sources.tf new file mode 100644 index 000000000..b367c86ac --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/agent_sources.tf @@ -0,0 +1,31 @@ +locals { + # TODO why is creating a db_with_agent conditioned by the creation of a cluster? + # change to local.agent_gw_count + db_types_for_agent = local.create_agent_gw_cluster > 0 ? var.simulation_db_types_for_agent : [] +} + +module "db_with_agent" { + source = "imperva/dsf-db-with-agent/aws" + version = "1.5.0" # latest release tag + count = length(local.db_types_for_agent) + + friendly_name = join("-", [local.deployment_name_salted, "db", "with", "agent", count.index]) + + os_type = var.agent_source_os + db_type = local.db_types_for_agent[count.index] + + subnet_id = local.agent_gw_subnet_id + key_pair = module.key_pair.key_pair.key_pair_name + allowed_ssh_cidrs = [format("%s/32", module.mx[0].private_ip)] + + registration_params = { + agent_gateway_host = module.agent_gw[0].private_ip + secure_password = local.password + server_group = module.mx[0].configuration.default_server_group + site = module.mx[0].configuration.default_site + } + tags = local.tags + depends_on = [ + module.agent_gw_cluster_setup + ] +} diff --git a/examples/installation/dsf_single_account_deployment/agentless_sources.tf b/examples/installation/dsf_single_account_deployment/agentless_sources.tf new file mode 100644 index 000000000..250eaba87 --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/agentless_sources.tf @@ -0,0 +1,61 @@ +locals { + db_types_for_agentless = local.agentless_gw_count > 0 ? var.simulation_db_types_for_agentless : [] +} + +module "rds_mysql" { + source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mysql-db" + version = "1.5.0" # latest release tag + count = contains(local.db_types_for_agentless, "RDS MySQL") ? 1 : 0 + + rds_subnet_ids = local.db_subnet_ids + security_group_ingress_cidrs = local.workstation_cidr + tags = local.tags +} + +module "rds_mssql" { + source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mssql-db" + version = "1.5.0" # latest release tag + count = contains(local.db_types_for_agentless, "RDS MsSQL") ? 1 : 0 + + rds_subnet_ids = local.db_subnet_ids + security_group_ingress_cidrs = local.workstation_cidr + + tags = local.tags + providers = { + aws = aws, + aws.poc_scripts_s3_region = aws.poc_scripts_s3_region + } +} + +module "db_onboarding" { + source = "imperva/dsf-poc-db-onboarder/aws" + version = "1.5.0" # latest release tag + for_each = { for idx, val in concat(module.rds_mysql, module.rds_mssql) : idx => val } + + sonar_version = module.globals.tarball_location.version + usc_access_token = module.hub[0].access_tokens.usc.token + hub_info = { + hub_ip_address = module.hub[0].public_ip + hub_private_ssh_key_path = module.key_pair.private_key_file_path + hub_ssh_user = module.hub[0].ssh_user + } + + assignee_gw = module.agentless_gw[0].jsonar_uid + assignee_role = module.agentless_gw[0].iam_role + database_details = { + db_username = each.value.db_username + db_password = each.value.db_password + db_arn = each.value.db_arn + db_port = each.value.db_port + db_identifier = each.value.db_identifier + db_address = each.value.db_address + db_engine = each.value.db_engine + db_name = try(each.value.db_name, null) + } + tags = local.tags + depends_on = [ + module.federation, + module.rds_mysql, + module.rds_mssql + ] +} diff --git a/examples/installation/dsf_single_account_deployment/backend.tf b/examples/installation/dsf_single_account_deployment/backend.tf new file mode 100644 index 000000000..7e31821e7 --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/backend.tf @@ -0,0 +1,9 @@ +#terraform { +# backend "s3" { +## Fill in your bucket details +# bucket = "myBucket" +# key = "terraform.tfstate" +# region = "us-east-1" +# profile = "myProfile" +# } +#} diff --git a/examples/installation/dsf_single_account_deployment/dam.tf b/examples/installation/dsf_single_account_deployment/dam.tf new file mode 100644 index 000000000..b9f06afbd --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/dam.tf @@ -0,0 +1,83 @@ +locals { + agent_gw_count = var.enable_dam ? var.agent_gw_count : 0 + gateway_group_name = "temporaryGatewayGroup" + create_agent_gw_cluster = local.agent_gw_count >= 2 ? 1 : 0 + + agent_gw_cidr_list = [data.aws_subnet.agent_gw.cidr_block] +} + +module "mx" { + source = "imperva/dsf-mx/aws" + version = "1.5.0" # latest release tag + count = var.enable_dam ? 1 : 0 + + friendly_name = join("-", [local.deployment_name_salted, "mx"]) + dam_version = var.dam_version + subnet_id = local.mx_subnet_id + license_file = var.license_file + key_pair = module.key_pair.key_pair.key_pair_name + secure_password = local.password + mx_password = local.password + allowed_web_console_and_api_cidrs = var.web_console_cidr + allowed_agent_gw_cidrs = [data.aws_subnet.agent_gw.cidr_block] + allowed_ssh_cidrs = local.workstation_cidr + allowed_hub_cidrs = local.hub_cidr_list + + hub_details = var.enable_dsf_hub ? { + address = coalesce(module.hub[0].public_dns, module.hub[0].private_dns) + access_token = module.hub[0].access_tokens["dam-to-hub"].token + port = 8443 + } : null + attach_persistent_public_ip = true + large_scale_mode = var.large_scale_mode.mx + + create_server_group = length(var.simulation_db_types_for_agent) > 0 + tags = local.tags + depends_on = [ + module.vpc + ] +} + +module "agent_gw" { + source = "imperva/dsf-agent-gw/aws" + version = "1.5.0" # latest release tag + count = local.agent_gw_count + + friendly_name = join("-", [local.deployment_name_salted, "agent", "gw", count.index]) + dam_version = var.dam_version + subnet_id = local.agent_gw_subnet_id + key_pair = module.key_pair.key_pair.key_pair_name + secure_password = local.password + mx_password = local.password + allowed_agent_cidrs = [data.aws_subnet.agent_gw.cidr_block] + allowed_mx_cidrs = [data.aws_subnet.mx.cidr_block] + allowed_ssh_cidrs = [data.aws_subnet.mx.cidr_block] + allowed_gw_clusters_cidrs = [data.aws_subnet.agent_gw.cidr_block] + management_server_host_for_registration = module.mx[0].private_ip + management_server_host_for_api_access = module.mx[0].public_ip + large_scale_mode = var.large_scale_mode.agent_gw + gateway_group_name = local.gateway_group_name + tags = local.tags + depends_on = [ + module.vpc + ] +} + +module "agent_gw_cluster_setup" { + source = "imperva/dsf-agent-gw-cluster-setup/null" + version = "1.5.0" # latest release tag + count = local.create_agent_gw_cluster + + cluster_name = join("-", [local.deployment_name_salted, "agent", "gw", "cluster"]) + gateway_group_name = local.gateway_group_name + mx_details = { + address = module.mx[0].public_ip + port = 8083 + user = module.mx[0].web_console_user + password = local.password + } + depends_on = [ + module.agent_gw, + module.mx + ] +} diff --git a/examples/installation/dsf_single_account_deployment/dra.tf b/examples/installation/dsf_single_account_deployment/dra.tf new file mode 100644 index 000000000..3aaa300cc --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/dra.tf @@ -0,0 +1,51 @@ +locals { + dra_analytics_server_count = var.enable_dra ? var.dra_analytics_server_count : 0 + + dra_admin_cidr_list = concat([data.aws_subnet.dra_admin.cidr_block], var.enable_dra ? [format("%s/32", module.dra_admin[0].public_ip)] : []) +} + +module "dra_admin" { + source = "imperva/dsf-dra-admin/aws" + version = "1.5.0" # latest release tag + count = var.enable_dra ? 1 : 0 + + friendly_name = join("-", [local.deployment_name_salted, "dra", "admin"]) + subnet_id = local.dra_admin_subnet_id + dra_version = module.globals.dra_version + ebs = var.dra_admin_ebs_details + admin_registration_password = local.password + admin_password = local.password + allowed_web_console_cidrs = var.web_console_cidr + allowed_analytics_server_cidrs = [data.aws_subnet.dra_analytics.cidr_block] + allowed_hub_cidrs = local.hub_cidr_list + attach_persistent_public_ip = true + key_pair = module.key_pair.key_pair.key_pair_name + tags = local.tags + depends_on = [ + module.vpc + ] +} + +module "analytics_server_group" { + source = "imperva/dsf-dra-analytics/aws" + version = "1.5.0" # latest release tag + + count = local.dra_analytics_server_count + friendly_name = join("-", [local.deployment_name_salted, "dra", "analytics", "server", count.index]) + subnet_id = local.dra_analytics_subnet_id + dra_version = module.globals.dra_version + ebs = var.dra_analytics_group_ebs_details + admin_registration_password = local.password + admin_password = local.password + allowed_admin_server_cidrs = [data.aws_subnet.dra_admin.cidr_block] + allowed_gateways_cidrs = distinct(concat(local.agent_gw_cidr_list, local.agentless_gw_cidr_list)) + allowed_ssh_cidrs = local.hub_cidr_list + key_pair = module.key_pair.key_pair.key_pair_name + archiver_password = local.password + admin_server_private_ip = module.dra_admin[0].private_ip + admin_server_public_ip = module.dra_admin[0].public_ip + tags = local.tags + depends_on = [ + module.vpc + ] +} diff --git a/examples/installation/dsf_single_account_deployment/dsf_deployment.zip b/examples/installation/dsf_single_account_deployment/dsf_deployment.zip new file mode 100644 index 0000000000000000000000000000000000000000..683d1deddcb657c0c6d9ed1371fa61130f0204aa GIT binary patch literal 12156 zcma)ibzB^4()Hl(1PSi$?(P!YA-FTRTW|^PmY~7i-QC^YA-EIlUPL@D7Cjeuf52zXNS$N<|}p0suGAfOnYRfPf}WuC@+V z7C3sFsm5RS&T=pkM;}Knj2fz6^`< zA2*Z=#EwAAn<-(P zB}$gWy{xq6{g_BF#sl;WaW(3TJ67H%MDPTBQt&1=!Z8t~uT=((-47Wuq)lcLVN)_N zfsnr`neyd{9Z@%-r5X`bP{#C``}DkSeNzPFAQPq6vqjp&i}h5I369gw>VzvgAYGjb z1{vGF_^5J9+H-!|i#~QGm7yKXK-;@EVy9K61uq3kX^{aN*&y4y7=||A?Q8Uv4cBgs z_>zh+v-^!ypTLNgZPL_<}WE$1P2umPF?R9rS_5n;S^b6Pt#-9@Z5{?Pen zKf}pX`j9S*-rM~LUi^TvC@TcAQ+W}Th z7a-;bc}&9?>rcdE9ZzcgBfM}GE1I+QP~z@rVAq>BnRY#ywuAKy-u@(R zoQ6l{{oKH{kL3OjYH0{m+H*p3vHl4z16byScyL}@0B3e7>E=gcUa=(?LQt|{^saQv znysiD;Yl>5W_A26FfNX8QiC{Z59Z+nN~wYsFWH$#-EsTW*s|bGP{k^n+mzr0mLFq! zAZn&dPjDDpcw?dS*-@{g`S4CL*=I)XU!r&Q#R;2|sKoZI*hJpOsXUWO)i=qxs8t7F zfCU7ld}UkvJE!%ONZ4{jrdP0}PZOE^FYPkG4JcEmPgp1(I#|p(_)TtDD=ZVuKP3I0 zj!X-ABE(+6sLI94>^1Rg<+(6!pKv5w3NcvxA6`BxLy(hjiF9Xn!i3Qo9A;t8&@(=O zv9d6~Rh+&7Cu_0ndp|2_)durr=p9+;w0*#zlrO5s0(Sv5(6@1bRY*gnG5|B*iell>`Kging7eH;aptr# z$2g36=px0EjVo2u1CB^5fmW~Whi{nVh0a#0%CKm`1y{`e(hwFFXHFE+yW~-8!9z^T z{keU!qs_nWJML`dXaN-Uyql{=AG5%{bLo}a=RWC;`@~jR^b4Ed9U<$2mq7(T?-vyT z!f#@#rmb(Uib&V}QLj;Of|oOiQsQ5#lYV`_Ss&()UR~Il9C}aV93Y`-!(|2G4! zAjG#TedkrKidTCw$D<`C%oIYW?X@4#@e|q`Py!t%NTgz>zYl`c>E9ne5Fmpfu#NyDYJj&pJ^ayS-)*!kK&5fgG3UU{&oQ%olC)>{C5?o4Awqr+1TX+L>XUoL30FoC z5tIq-1RgI9THjSTJ&c%bK;CJZEEgYXYNCN2Ki!z)>l+^xgl?~(|M#tVm9A$p1zum6 z**%6p$6==>Qu*bQgH<96#!{h@@l2u{|C$1%&t7Nn-32vTxE=eIhg5EU0Xb$2ofm^H z@I{aDXFfUU0{4*Ses>pm3Y$uhObOpU$n8a(Oowav`Z)tK2H7It?rdUDw!a?L%Oi%} zNJjqPK4N#!(@pKDAqS;mZ=MM5uQiyJ2~&$rU=tgs?k}RY+`Y`t38A@$02_mRBH|yqKL~p`T+gnK-k|3U1~Phj#E} zIFI&0xHDOR48d9+r3V=%#h&jec`>9VtDl|CMpO^pipjJ*F4z(~Hr7@}naC7$^kaiQ ztz+sI$y0T-B)HWl=hx$-Ag~0t$#~>)<0)@@Ou>REk=zf3K3Xmcl<_n zsq8w+WNS#(d=NwmF=}yQS)JFI5uTY;yu@AL6SXxUqwSx6RmX+89%j5NKo#{QI~*-? zbH3|!sx@6KzP$NtsA8~D*fx!+cr@czVeGAuSMmn&*OiYed(L)yufq=B>#z^Mue`B? z!QU+WdjS@;%!uxFKvSxcDCmCzENNE}enAeT9pJ}Vbv_w%$tpbVxL<7AB!fwy`$Uw= zwScXSJ)S_)ff)!rLOCP*t!+1oyHcVuAzj?FnoPpfak5lF5JkeL7n1{PW9T|3nu@xJ zD!E&UOc%O8a(|S;)!#j-22CklIA8+G34bbx`Prre|6slCqqf19lN!%ozsR)XheO4? zf;v3c51f@Lhl)zoJxfg+KGVI@V$j7HgE4S$ps2KYEN z!(y4V9&m=sO~7gY3i?+!o+lGFtA7t*(eG@7;CDARFnebwpB-(T9gIx=x8nOZkt|Z# z;Ol!Nd-?vEh#As#GJmbV9scZ2M4CyyG;bZ3M^fI=VAVGhm!s{je-qS?)5F8#Q=G1O ztQ5JweR4hgsuVng`F4+cLcc51&2NA`9xuCfc3MZQFA41x$wI zHgq5&8;R%|3?t2#k}DkDpg`^F!^rf+1wPnA^~%@m)g&BFR? zrfWx9lFbdfbFuD$u>!ua3>-Z+ZY{(A=JZ6)fVLEB_vRHLc@?;+s;4Q~UMw5JYJqFK zp+Q&TsX^Vw(y>k!i;hVr(Zu5m@=tg8gI#9g%iQ_DbItMhsuM!Qh0fkL$MZjy4@Tg`g%lV{MgW}RnCLo*8|tJx9u~L zA5Y)ja8*^!e1Y38P}QcCUJxC%g$b`+ilSBj;^e2U%XI)q0HB@(0KokHt{iQF1`dA{ ztxq-I=`uUA_d8uab};}6kF#D$2CVyp8X(t-uB<9xhQ{G>Ty(ewH!Gr&ElGXMI{IZ$ zgK8G+$k&r?jLKLvID$Y#G_n*-B@c#TD*BeR+sWL~axoIu8)a(8OGkQtf z;bSq~8fLmQ5!@|FQwIo_pfWW{(6H3Cb{t`t62|V=4} z(2-sYxaT zU%(TKX(H_C z#f-E=`T_r%J0vuUtxIg;uuH_}Pop7~fp&>!H%kBNNAfb#*Q~qr`-B`7Lasm_i;+CmSIV zaXAS%QnacXD@fo4-&3|ik#PaHe3lxPslH8E2RuB|1L$EfJhagsk@h$GixW$B$vs(_ zd8IMr?Qn97;&COz1Gd8kt)Hf~%HpSIl_>yTfED`OL)~;Ybqk^eAlh5Oy63g(;^EdR z0MZlE=3cmwHozXwxI1%xQ1h9!rf4Vg>(p$59a&CVDP|5eQH98S+&QSzDY9WHd~&bU z;AvpYt)iTeD(E`*$4PU5q4P0TOvI3Rm#d1n4D$>c_s6#rxcT;rp=Q7S(NseZoGimI zStBas)vc+AM00BQ){%R-6_8c3jiI)%AI+rZZBhfnsYQJiDU%RimYnHaHnTr}K?uA_ zIUb%I17dTO|<(> z-%8CMsT6>37t#oS52PV!^LSr;@bAEz#nxkpm<)2?A`{o}D^ECI8F z9Kb4l$HXMmE#1rC5RXB226KI}+io!7RDFI8#h!3`DT<-?3#Na=A+gup<^^G7i<2@b zZZ7UMxa-p%1m`%Srq6U#=dcki!0t zuz3cr5_#xhPj}l0FSi|=O4l>eG`LrBfajN8fVqquBnwhr=l7X|-dQC@QA7{ zBrf#%-O0&doEKhSeF^5x%=mIxt*;s+Ns{Uf%Z}ZQz1nJ5*l<>pLfn?%N7lyYZEfh* zvbx4xIqeYlXct}im2VPC#?G=*ePXpXt)&#cS0H>e#IA0`9&(CWu`g|LMH=WVk3x9h zwdxkB7%;ErNdHpFV$~He=Km&TR%CD7TC!<5r||lxH2DYZqX_rQaEAc^Mo8bgWWOgA z7Xt?i14HY-t(fLCJ#CK%KfJy9gewE<{9xgC+ba?htJ8tDv-xs8cSLJ>^ZH1Pk*AmA zi^v`v8*K-p5eUJ4F{|cvGQ?zV_y;dMNE4~3)){u@eY~G`nfEkNOlkM%9Wcx>}?PRodq#= zyL6L@vfQMIxE!oemU9M|S<`N{8&+e~4YGKYInNfctT8Bv(zKJ1+L=h1b2(-QdSAYm zp4J4$gM5S(^81le(+C2Z648r}DU)L8Va3*GW7Ef*&m@A_%GqrEb02>G*oSqG?@e3Z zmARDa&h6%(c*xnV+@>&ZYxKP|)Wi8=cEce`7F^?SGV`V}Y%KE>MEzJl&5)P@xBX#U zT%fbS0iO2jmQ#2HnU2*1TCrDG(#K`qg=0!+Kh^F+!zEFMXkJFmP7`7KuYDFXQ02(l4j(e%Iu)g2>3 zni(c_ZCdz-qqn&eQr8uBykMz5yvNdk`tUauSSPC;Ke_wjwisEC{r>WY4V0oS^O-pD z;sQx0c!n)uV`4hx$|&Kk=&Ce*%Ep(c-8Gt@U=Gzcip)6`fu+@!q4sZ~lNw$mu*PKsy!3 zy7gWCuwm0k*qKg|z=j3@inBhDD8+9myc9gU$TaNPbZ?Y|G6RQxpCnr3RFRFek#o^$gYS9Gtj zg__QK`XFg6TwKgF`2H}yS37VGV!y!Gb?vy@*$O))^YyCPIpCardMZAjBky-(7=+5R_l`U>qYox`l;Xm=Rj)=3_zn^7?!- zJXDVYW6a=0Yj|MsD<64Ljb)l`msJ+s^r25ylm#o2|MnBZgr68n(}}RYsD2*vbRk+G zrpbFAi}vDWb=n88cms%PsNG=S`*K^qcpCR=** zT1K&cep0(*;@>X^1a2?zN#&KxXF8g!?hwD>@BSS%uQ}>dE!2J zWR(Dx9lY|uEFa1M`+}!67_iJmu@x>BuPPE`j^CNdHQ`s_UX^DoQ8p=jh;8#(^=bA; z%nnLu^OHYj{lA+Lf`ntfQq4N6(H&NUtZ}v-+oA#18(O&Rlc0=VuUDsG6Fs4Nn&j zFVFk4e33?P-4y+!KA!%pl*l|-&XZ*vJmtVpUuT`qchtB{+aV@(lYg5s9nqT^mTTi=J`K@*CP=H&z+FEIwzLX{OGFRfr3hBz0&wZRjN&FV6`SWa8IA&pKkYgTJccRy{6vojm>DcNCRHvMln%cs@O zkW9xzSu6&=M@zJOFGjzI9p^>KTVzQRT}Tki83Tor(QEJ zA=gqPXhvk#XPv7enUZE0_JQeyLkatO4MN029nI-FPSrz~&r!-M9VrJa7!iv*MEsgO z06GGlkzAazyvSM*u);)9wh&=XYq7t5$E z_2i}dd3r5x&!V63gbt)CLgn{Eum=*Z;`9tPs+GG5N*8iiDSlJi3&envGJ}(E%4gww zGDTmDwsSL4KRN2JJlC9$>OPrE+k&)z-52OD9C4s%>xfjlK@5~*p|AlQkQgY>K*(+K z%3}BnN0S;`4a{OpLNZcc>rfv@tx*fdZxi^@vtTe>1KQ&cVZun`m5(@jz-~=2PDZIl zInS!grM^8!{osmfd6eA8)Mw7Z`eMWZ2YD`qlcKgKSo?qhgMFDQFG1Lb@^`jdET2P}U>zT^=r>S3l zHia@`7lv4#re4a}YK5oVJF;56tv$I85cq4x*7JTjQyjlc@1h@rccC(Idp3nSQRlxc zVV!e%eWJqwK3E5TwSnk~o3zK?m#s=Lsy!|y2kJL6pv@(J;VP9gu8={1P&NwKM~LLF zK;1xstXS6|W8a8~5V;b>!(dKW(x^Tb>&_+DS2rVR)QEb0#XL`0Hua9j#|@)rgbTO{ z?E-!p1SG7U{M+7Rj~RlxQ5wY*&1K}Ek>2#Fcv$VuNAz9dFIsFzIn2^ z$$ZVJB;8uXzNUqs|ND4-_FPu)S`-L4Yji=8^h;Fh_a5KXqA#5@(kh8-GiGZSsPm@# zgjY5@sTNP1u~TLBJvL~2CTa8w)G{n!zOsp841Vo7{W%Nn6WRvY!HDG*vhF4{IcB9* z!9UsNFK8@juj^+bLJ)`9F_!5<*_Fby?aVC8epQEhyYn+wu=)D!GM&U;-=h2bR^Z#R zDcT`Ct^2sW{W>iNpH6svIPE0yuuAK=g3#Y7yF!nbeucAa^+|nRS@>ng=p}jj2KCRT z#~;;P>a}!A$$Kkz7773$`MsL^M-S}1m~;FO`(S;la<*tp$ZZGe5#2u$z@fnT@*)q~uqQlO%5SB${4-eqy5Vkxv7$HK zEz=`eTNu`$=+(lQbf{s+nv@%j(5{uiNR`RioDyD0NYORPOV$U@MkxD2@r!teE#D5b zcGI(vIp0=KEa5$yrfW6l_2`_Nc*(me<0fjg>bT7NxRSV_jP^5(qx~XWsfOe(%QYsp z5Hzr3iNR|rVTgvq{Z50F+4gi3`lnP^=2O!d^j~jx_`LX6eIHpn_7QCxlW!wx@-Pai z@Wc&RyKV~5!dVbgW+{uwQ{0eMFXZSVN`#dZvGEEi_?|9A6~%$%5-9_izPj7B8lc}o zWqpN=e@PI}yrDNKyXTM#;ytfd(o)5!fa`nHf9$FfgQC{Z6rxVyc0jWg>nv71GmJ=O zFam3;$`xMine7#@w?A1pM_73vR4BGNX7L8e^`&*oro&3+Rbkev!=(9js@wTM8ac$q zGa%a7HZq&6*Q^vLk4UHDzO!<82O5#6Bz<=)!;EUz08k&Hz3k`T~1gv z4+_A~`0iJc5GB7cSqx>u5Ya;InYZVrmM4%5oCjH_N(ogZf)8s7`2+=he?(%Ih{)ht ztO6U0ESE0U06`(KS4dc9baX4T(QA|P=T8OE$)@1d)P#cc&ZAgBy15SW5`&E8CpRHUn}w0n_;&B zlTbeMwGyB-3m7!tb=_#M@?Zhzb&!LQpQa854m)j zx<9*t%bjaBxeWd$AP`gsOHrr!)2;rq)OtQ-Xg4b%|2gIpz6`ew!}gx`MEB>vCa;(s z<_W3y=`-p3ON;e;@=_8L5|tC9w=w?zDa_i`WNdLbklW8IQo1qWV$?%t^x$s@HC!?R zW;&VuL4~T)bjPIwU8NFtrB!{sgGanq!Y=ESkr{&)<0w$2-QKmT*^9aaR6&#UxQFRY`;sMPuLWpfzO9 z{mUS`C1nd=$X?O}Y&8adRxZi+6I`{Crz{FFnaa@&UN_x)EvbS?l@)9w{GE!l26s5xm69yr4+A0HtJM{6=z~8w1u|q-SRRCqX@1lG{%Hpp?Jlo7N!B;~cVVz; z=e`6Qhh?w0PrPjEISJvX*z;2vtT0xHHa9ZxBL>02+!93r*XciHVura@o?Y1yY(afA z%0l$zpER#iyyE6PhmFU9t0C6*v93#Xr@HY&Tp3Of*wlm6Kb@%F^11f{s--l;Gp^HK z!?s&b)k9L6;veg*>Ox36}lVj51{oYu6ikf0X;~3SMzkOsk zmzFI|?HI*z zylqVbhQSUZ#QJ7AbZvo*xZ zPmk@+y7I<6&EPJW`X>?8eZ{y(?fJ_hYGuf4r+Z5~?m7+Jm7#d+Xok=>#%S&AbLgAR z<-LRW$JOYQRQ0|LG9L7OE_>UQ12Dc^&VJa;z6)EG1rRS{*V+BBk)iZz)#5=iC5`$^ zdy|zts*IIp{sOi~rh~bjI>)Hq49rh`+H(e(k2Z8Hz2#w(-Dz@$Yu2dz0~Xuzp4DM9 zVQNL0$2TFHMaBDbxr3pH(F8R>EbWk>Ip`wH#FS$GJ1JG^)R<#7QA&F857f- zWc4~_e59Y18ndg^TLzu+;#(l^*WgMs+6#FGvw@x)?^0C|P<*g|r+USDA1eN_Ap=f@ z{y6`pfra=l4eW3F*FQl2odWj1K;I|WfIL5`-$DOH5BqN6kD2A)Soo^|^3N7Z-)X|X zT=t*kkbe*VyJ+ao@C)w@mn59bf;zsgenj6L<l^ndp3|2hi(Grl*{Z}|Tl4FCPb|2`J@Gx+$2 z-@yMmB>2w^{FSNx*+2%tZwCHv-uj;z`YRRqv!OiV-wYZ5H!=9np#NI1{25x4{5R-7 al`sDe9pZhk4* 0 ? 1 : 0 + + sonar_version = module.globals.tarball_location.version + dsf_primary_ip = module.hub[0].public_ip + dsf_primary_private_ip = module.hub[0].private_ip + dsf_secondary_ip = module.hub_secondary[0].public_ip + dsf_secondary_private_ip = module.hub_secondary[0].private_ip + ssh_key_path = module.key_pair.private_key_file_path + ssh_user = module.hub[0].ssh_user + depends_on = [ + module.hub, + module.hub_secondary + ] +} + +module "agentless_gw" { + source = "imperva/dsf-agentless-gw/aws" + version = "1.5.0" # latest release tag + count = local.agentless_gw_count + + friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index]) + subnet_id = local.agentless_gw_subnet_id + ebs = var.agentless_gw_ebs_details + binaries_location = local.tarball_location + password = local.password + hub_sonarw_public_key = module.hub[0].sonarw_public_key + ssh_key_pair = { + ssh_private_key_file_path = module.key_pair.private_key_file_path + ssh_public_key_name = module.key_pair.key_pair.key_pair_name + } + allowed_agentless_gw_cidrs = [data.aws_subnet.agentless_gw_secondary.cidr_block] + allowed_hub_cidrs = [data.aws_subnet.hub.cidr_block, data.aws_subnet.hub_secondary.cidr_block] + allowed_all_cidrs = local.workstation_cidr + ingress_communication_via_proxy = { + proxy_address = module.hub[0].public_ip + proxy_private_ssh_key_path = module.key_pair.private_key_file_path + proxy_ssh_user = module.hub[0].ssh_user + } + tags = local.tags + depends_on = [ + module.vpc, + ] +} + +module "agentless_gw_secondary" { + source = "imperva/dsf-agentless-gw/aws" + version = "1.5.0" # latest release tag + count = var.agentless_gw_hadr ? local.agentless_gw_count : 0 + + friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", "secondary", count.index]) + subnet_id = local.agentless_gw_secondary_subnet_id + ebs = var.agentless_gw_ebs_details + binaries_location = local.tarball_location + password = local.password + hub_sonarw_public_key = module.hub[0].sonarw_public_key + hadr_secondary_node = true + primary_node_sonarw_public_key = module.agentless_gw[count.index].sonarw_public_key + primary_node_sonarw_private_key = module.agentless_gw[count.index].sonarw_private_key + ssh_key_pair = { + ssh_private_key_file_path = module.key_pair.private_key_file_path + ssh_public_key_name = module.key_pair.key_pair.key_pair_name + } + allowed_agentless_gw_cidrs = [data.aws_subnet.agentless_gw.cidr_block] + allowed_hub_cidrs = [data.aws_subnet.hub.cidr_block, data.aws_subnet.hub_secondary.cidr_block] + allowed_all_cidrs = local.workstation_cidr + ingress_communication_via_proxy = { + proxy_address = module.hub[0].public_ip + proxy_private_ssh_key_path = module.key_pair.private_key_file_path + proxy_ssh_user = module.hub[0].ssh_user + } + tags = local.tags + depends_on = [ + module.vpc, + ] +} + +module "agentless_gw_hadr" { + source = "imperva/dsf-hadr/null" + version = "1.5.0" # latest release tag + count = length(module.agentless_gw_secondary) + + sonar_version = module.globals.tarball_location.version + dsf_primary_ip = module.agentless_gw[count.index].private_ip + dsf_primary_private_ip = module.agentless_gw[count.index].private_ip + dsf_secondary_ip = module.agentless_gw_secondary[count.index].private_ip + dsf_secondary_private_ip = module.agentless_gw_secondary[count.index].private_ip + ssh_key_path = module.key_pair.private_key_file_path + ssh_user = module.agentless_gw[count.index].ssh_user + proxy_info = { + proxy_address = module.hub[0].public_ip + proxy_private_ssh_key_path = module.key_pair.private_key_file_path + proxy_ssh_user = module.hub[0].ssh_user + } + depends_on = [ + module.agentless_gw, + module.agentless_gw_secondary + ] +} + +locals { + gws = merge( + { for idx, val in module.agentless_gw : "agentless-gw-${idx}" => val }, + { for idx, val in module.agentless_gw_secondary : "agentless-gw-secondary-${idx}" => val }, + ) + gws_set = values(local.gws) + hubs_set = concat( + var.enable_dsf_hub ? [module.hub[0]] : [], + var.enable_dsf_hub && var.hub_hadr ? [module.hub_secondary[0]] : [] + ) + hubs_keys = compact([ + var.enable_dsf_hub ? "hub-primary" : null, + var.enable_dsf_hub && var.hub_hadr ? "hub-secondary" : null, + ]) + + hub_gw_combinations_values = setproduct(local.hubs_set, local.gws_set) + hub_gw_combinations_keys = [for v in setproduct(local.hubs_keys, keys(local.gws)) : "${v[0]}-${v[1]}"] + + hub_gw_combinations = zipmap(local.hub_gw_combinations_keys, local.hub_gw_combinations_values) +} + +module "federation" { + source = "imperva/dsf-federation/null" + version = "1.5.0" # latest release tag + for_each = local.hub_gw_combinations + + hub_info = { + hub_ip_address = each.value[0].public_ip + hub_private_ssh_key_path = module.key_pair.private_key_file_path + hub_ssh_user = each.value[0].ssh_user + } + gw_info = { + gw_ip_address = each.value[1].private_ip + gw_private_ssh_key_path = module.key_pair.private_key_file_path + gw_ssh_user = each.value[1].ssh_user + } + gw_proxy_info = { + proxy_address = module.hub[0].public_ip + proxy_private_ssh_key_path = module.key_pair.private_key_file_path + proxy_ssh_user = module.hub[0].ssh_user + } + depends_on = [ + module.hub_hadr, + module.agentless_gw_hadr + ] +} diff --git a/examples/installation/dsf_single_account_deployment/variables.tf b/examples/installation/dsf_single_account_deployment/variables.tf new file mode 100644 index 000000000..6ff3a7058 --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/variables.tf @@ -0,0 +1,271 @@ +variable "deployment_name" { + type = string + default = "imperva-dsf" + description = "Deployment name for some of the created resources. Please note that when running the deployment with a custom 'deployment_name' variable, you should ensure that the corresponding condition in the AWS permissions of the user who runs the deployment reflects the new custom variable." +} + +variable "enable_dsf_hub" { + type = bool + default = true + description = "Provision DSF Hub and Agentless Gateways (formerly Sonar). To provision only a DSF Hub, set agentless_gw_count to 0." +} + +variable "enable_dam" { + type = bool + default = true + description = "Provision DAM MX and Agent Gateways" +} + +variable "enable_dra" { + type = bool + default = true + description = "Provision DRA Admin and Analytics" +} + +variable "agentless_gw_count" { + type = number + default = 1 + description = "Number of Agentless Gateways. Provisioning Agentless Gateways requires the enable_dsf_hub variable to be set to 'true'." +} + +variable "agent_gw_count" { + type = number + default = 2 # Minimum count for a cluster + description = "Number of Agent Gateways. Provisioning Agent Gateways requires the enable_dam variable to be set to 'true'." +} + +variable "dra_analytics_server_count" { + type = number + default = 1 + description = "Number of DRA Analytics servers. Provisioning Analytics servers requires the enable_dra variable to be set to 'true'." +} + +variable "password" { + sensitive = true + type = string + default = null # Random + description = "Password for all users and components including internal communication (DRA instances, Agent and Agentless Gateways, MX and Hub) and also to MX and DSF Hub web console (Randomly generated if not set)" +} + +############################## +#### networking variables #### +############################## +variable "web_console_cidr" { + type = list(string) + default = ["0.0.0.0/0"] + description = "DSF Hub and MX web console IPs range. Please specify IPs in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]. The default configuration opens the DSF Hub web console as a public website. It is recommended to specify a more restricted IP and CIDR range." +} + +variable "workstation_cidr" { + type = list(string) + default = null + description = "IP ranges from which SSH/API access will be allowed to setup the deployment. If not set, the public IP of the computer where the Terraform is run is used. Format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]" +} + +variable "vpc_ip_range" { + type = string + default = "10.0.0.0/16" + description = "VPC cidr range" +} + +variable "private_subnets" { + type = list(string) + default = ["10.0.1.0/24", "10.0.2.0/24"] + description = "VPC private subnet cidr range" +} + +variable "public_subnets" { + type = list(string) + default = ["10.0.101.0/24", "10.0.102.0/24"] + description = "VPC public subnet cidr range" +} + +variable "subnet_ids" { + type = object({ + hub_subnet_id = string + hub_secondary_subnet_id = string + agentless_gw_subnet_id = string + agentless_gw_secondary_subnet_id = string + mx_subnet_id = string + agent_gw_subnet_id = string + admin_subnet_id = string + analytics_subnet_id = string + }) + description = "The IDs of existing subnets to deploy resources in" + validation { + condition = var.subnet_ids == null || try(var.subnet_ids.hub_subnet_id != null && var.subnet_ids.hub_secondary_subnet_id != null && var.subnet_ids.agentless_gw_subnet_id != null && var.subnet_ids.agentless_gw_secondary_subnet_id != null && var.subnet_ids.mx_subnet_id != null && var.subnet_ids.agent_gw_subnet_id != null && var.subnet_ids.admin_subnet_id != null && var.subnet_ids.analytics_subnet_id != null && var.subnet_ids.db_subnet_ids != null, false) + error_message = "Value must either be null or specified for all" + } + validation { + condition = var.subnet_ids == null || try(alltrue([for subnet_id in values({ for k, v in var.subnet_ids : k => v if k != "db_subnet_ids" }) : length(subnet_id) >= 15 && substr(subnet_id, 0, 7) == "subnet-"]), false) + error_message = "Subnet id is invalid. Must be subnet-********" + } +} + +############################## +#### DAM variables #### +############################## + +variable "dam_version" { + type = string + description = "The DAM version to install" + default = "14.12.1.10" + validation { + condition = can(regex("^(\\d{1,2}\\.){3}\\d{1,2}$", var.dam_version)) + error_message = "Version must be in the format dd.dd.dd.dd where each dd is a number between 1-99 (e.g 14.10.1.10)" + } +} + +variable "license_file" { + type = string + description = "DAM license file path" +} + +variable "large_scale_mode" { + type = object({ + mx = bool + agent_gw = bool + }) + description = "DAM large scale mode" + validation { + condition = var.large_scale_mode.mx == false || var.large_scale_mode.agent_gw == true + error_message = "MX large scale mode requires setting large scale mode in the Agentless Gateway as well" + } + default = { + mx = false + agent_gw = true + } +} + +variable "simulation_db_types_for_agent" { + type = list(string) + default = ["MySql"] + description = "Types of databases to provision on EC2 with an Agent for simulation purposes. Available types are: 'PostgreSql', 'MySql' and 'MariaDB'." + validation { + condition = alltrue([ + for db_type in var.simulation_db_types_for_agent : contains(["PostgreSql", "MySql", "MariaDB"], db_type) + ]) + error_message = "Value must be a subset of: ['PostgreSql', 'MySql', 'MariaDB']" + } +} + +variable "agent_source_os" { + type = string + default = "Ubuntu" + description = "Agent OS type" +} + +############################## +#### Sonar variables #### +############################## + +variable "sonar_version" { + type = string + default = "4.12" + description = "The Sonar version to install. Supported versions are: 4.11 and up. Both long and short version formats are supported, for example, 4.12.0.10 or 4.12. The short format maps to the latest patch." + validation { + condition = ! startswith(var.sonar_version, "4.9.") && ! startswith(var.sonar_version, "4.10.") + error_message = "The sonar_version value must be 4.11 or higher" + } +} + +variable "hub_hadr" { + type = bool + default = true + description = "Provisions a High Availability and Disaster Recovery node for the DSF Hub" +} + +variable "agentless_gw_hadr" { + type = bool + default = true + description = "Provisions a High Availability and Disaster Recovery node for the Agentless Gateway" +} + +variable "hub_ebs_details" { + type = object({ + disk_size = number + provisioned_iops = number + throughput = number + }) + description = "DSF Hub compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm" + default = { + disk_size = 250 + provisioned_iops = 0 + throughput = 125 + } +} + +variable "agentless_gw_ebs_details" { + type = object({ + disk_size = number + provisioned_iops = number + throughput = number + }) + description = "DSF Agentless Gateway compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm" + default = { + disk_size = 150 + provisioned_iops = 0 + throughput = 125 + } +} + +variable "additional_install_parameters" { + default = "" + description = "Additional params for installation tarball. More info in https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/80035.htm" +} + +variable "simulation_db_types_for_agentless" { + type = list(string) + default = ["RDS MySQL"] + description = "Types of databases to provision and onboard to an Agentless Gateway for simulation purposes. Available types are: 'RDS MySQL' and 'RDS MsSQL'. 'RDS MsSQL' includes simulation data." + validation { + condition = alltrue([ + for db_type in var.simulation_db_types_for_agentless : contains(["RDS MySQL", "RDS MsSQL"], db_type) + ]) + error_message = "Value must be a subset of: ['RDS MySQL', 'RDS MsSQL']" + } +} + +variable "database_cidr" { + type = list(string) + default = null # workstation ip + description = "CIDR blocks allowing dummy database access" +} + +############################## +#### DRA variables #### +############################## + +variable "dra_version" { + type = string + default = "4.12.0.10" + description = "The DRA version to install. Supported versions are 4.11.0.10 and up. Both long and short version formats are supported, for example, 4.11.0.10 or 4.11. The short format maps to the latest patch." + validation { + condition = ! startswith(var.dra_version, "4.10.") && ! startswith(var.dra_version, "4.9.") && ! startswith(var.dra_version, "4.8.") && ! startswith(var.dra_version, "4.3.") && ! startswith(var.dra_version, "4.2.") && ! startswith(var.dra_version, "4.1.") + error_message = "The dra_version value must be 4.11.0.10 or higher" + } +} + +variable "dra_admin_ebs_details" { + type = object({ + volume_size = number + volume_type = string + }) + description = "Admin Server compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/69846.htm" + default = { + volume_size = 260 + volume_type = "gp3" + } +} + +variable "dra_analytics_group_ebs_details" { + type = object({ + volume_size = number + volume_type = string + }) + description = "Analytics Server compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/69846.htm" + default = { + volume_size = 1010 + volume_type = "gp3" + } +} diff --git a/examples/installation/dsf_single_account_deployment/versions.tf b/examples/installation/dsf_single_account_deployment/versions.tf new file mode 100644 index 000000000..e1538584d --- /dev/null +++ b/examples/installation/dsf_single_account_deployment/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_version = ">= 1.3.1, < 1.6.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.23.0" + } + local = { + version = "~> 2.1" + } + } +} diff --git a/examples/installation/sonar_multi_account_deployment/README.md b/examples/installation/sonar_multi_account_deployment/README.md index 94016e724..0907c1036 100644 --- a/examples/installation/sonar_multi_account_deployment/README.md +++ b/examples/installation/sonar_multi_account_deployment/README.md @@ -1,7 +1,7 @@ # Sonar Multi Account Deployment example [![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags) -A DSF Hub and Agentless Gateway (formerly Sonar) deployment with Hub HADR and Agentless Gateway HADR; deployed in multiple accounts and multiple regions. +A DSF Hub and Agentless Gateway (formerly Sonar) deployment with full HADR; deployed in multiple accounts and multiple regions. This deployment consists of: @@ -11,61 +11,68 @@ This deployment consists of: 4. One secondary Agentless Gateway in AWS account B, region Y 5. DSF Hub HADR setup 6. Agentless Gateway HADR setup -7. Federation of both primary and secondary DSF Hub with all Agentless Gateways (primary and secondary) +7. Federation of both primary and secondary DSF Hub with all primary and secondary Agentless Gateways -This example is intended for PS/customers who want to bring their own networking. -It is mandatory to provide as input to this example the subnets to deploy the DSF Hub and the Agentless Gateways. -They can be in the same or in different subnets.
+This example is intended for Professional Service and customers who want to bring their own networking, security groups, etc.
+It is mandatory to provide as input to this example the following variables: +1. The AWS profiles of the DSF Hub and Agentless Gateways' AWS accounts +2. The AWS regions of the DSF Hubs and Agentless Gateways +3. The subnets in which to deploy the DSF Hub and the Agentless Gateways, they can be in the same or in different subnets +4. The proxy details for SSH to the DSF Hub and the Agentless Gateways -In this example you should supply the proxy details for ssh to the DSF Hub and the Agentless Gateways.
+It is not mandatory to provide the security groups Ids of the DSF Hubs and the Agentless Gateways, but in case they are provided, you should add the CIDRs and ports of the Agentless Gateways to the security groups of the DSF Hubs and vice versa before running the deployment.
-Note that in case of supplying the security group id of the DSF Hubs and the Agentless Gateways, you should add the cidr of the Agentless Gateways to the security group of the DSF Hubs and vice versa before running the deployment.
+## Customizing Variables -#### Running terraform with variables -In the current example setting AWS profile, AWS region and the subnets of the DSF Hub and the Agentless gateways are mandatory.
+There are various ways to customize variables in Terraform, in this example, it is recommended to create a 'terrafrom.tfvars' +file in the example's directory, and add the customized variables to it. -This example contains variables with default values. In order to customize the variables, you can use the following: -* Run terraform with variables in a command line. For example, in order to specify the desired workstation CIDR that allows hub SSH and debugging access instead of using the default workstation CIDR where the installation is running from, run the following:
- ```bash - terraform apply -auto-approve -var 'aws_profile_hub="profileHub"' -var 'aws_profile_gw="profileGw"' -var 'aws_region_hub_primary="us-east-1"' -var 'aws_region_hub_secondary="us-east-2"' -var 'aws_region_gw_primary="us-east-1"' -var 'aws_region_gw_secondary="us-west-1"' -var 'subnet_hub_primary="subnet-xxxxxxxxxxxxxxxx1"' -var 'subnet_hub_secondary="subnet-xxxxxxxxxxxxxxxx2"' -var 'subnet_gw_primary="subnet-xxxxxxxxxxxxxxxx3"' -var 'subnet_gw_secondary="subnet-xxxxxxxxxxxxxxxx4"' -var 'workstation_cidr=["10.0.0.0/24"]' -var 'proxy_address="x.x.x.x"' -var 'proxy_private_address="x.x.x.x"' -var 'proxy_ssh_key_path="/proxy-ssh-key-path.pem"' -var 'proxy_ssh_user="ec2-user"' - ``` -* In case there are a lot of variables that need to be changed, it might be more convenient to run Terraform using a file called 'terraform.tfvars' which should contain all the mandatory and customized variables. Using 'terraform.tfvars' file replace the need to use 'var' parameter in 'terraform apply' command. The file should be located in the same directory of the example.

- Example for 'terraform.tfvars' file with security groups for all primary and secondary DSF Hub and Agentless Gateways:
- aws_profile_hub="profileHub"
- aws_profile_gw="profileGw"
- aws_region_hub_primary="us-east-1"
- aws_region_hub_secondary="us-east-2"
- aws_region_gw_primary="us-east-1"
- aws_region_gw_secondary="us-west-1"
- subnet_hub_primary="subnet-xxxxxxxxxxxxxxxx1"
- subnet_hub_secondary="subnet-xxxxxxxxxxxxxxxx2"
- subnet_gw_primary="subnet-xxxxxxxxxxxxxxxx3"
- subnet_gw_secondary="subnet-xxxxxxxxxxxxxxxx4"
- security_group_ids_hub_primary=["sg-xxxxxxxxxxxxxxxx1"]
- security_group_ids_hub_secondary=["sg-xxxxxxxxxxxxxxxx2"]
- security_group_ids_gw_primary=["sg-xxxxxxxxxxxxxxxx3"]
- security_group_ids_gw_secondary=["sg-xxxxxxxxxxxxxxxx4"]
- proxy_address="x.x.x.x"
- proxy_private_address="x.x.x.x"
- proxy_ssh_key_path="/proxy-ssh-key-path.pem"
- proxy_ssh_user="ec2-user"

+For example: + ```tf + aws_profile_hub = "profileHub" + aws_profile_gw = "profileGw" + aws_region_hub_primary = "us-east-1" + aws_region_hub_secondary = "us-east-2" + aws_region_gw_primary = "us-east-1" + aws_region_gw_secondary = "us-west-1" + subnet_hub_primary = "subnet-xxxxxxxxxxxxxxxx1" + subnet_hub_secondary = "subnet-xxxxxxxxxxxxxxxx2" + subnet_gw_primary = "subnet-xxxxxxxxxxxxxxxx3" + subnet_gw_secondary = "subnet-xxxxxxxxxxxxxxxx4" + security_group_ids_hub_primary = ["sg-xxxxxxxxxxxxxxxx11", "sg-xxxxxxxxxxxxxxxx12"] + security_group_ids_hub_secondary = ["sg-xxxxxxxxxxxxxxxx21", "sg-xxxxxxxxxxxxxxxx22"] + security_group_ids_gw_primary = ["sg-xxxxxxxxxxxxxxxx31", "sg-xxxxxxxxxxxxxxxx32"] + security_group_ids_gw_secondary = ["sg-xxxxxxxxxxxxxxxx41", "sg-xxxxxxxxxxxxxxxx42"] + proxy_address = "x.x.x.x" + proxy_private_address = "x.x.x.x" + proxy_ssh_key_path = "/proxy-ssh-key-path.pem" + proxy_ssh_user = "ec2-user" + tarball_location = { + s3_bucket = "bucket_name" + s3_region = "us-east-1" + s3_key = "tarball_name" + } + workstation_cidr = ["10.0.0.0/24"] + ``` -In this case the deployment can be run by the following command: +Then run the deployment as usual with the following command: ```bash - terraform apply -auto-approve + terraform apply ``` For a full list of this example's customization options which don't require code changes, refer to the [variables.tf](./variables.tf) file. -### Storing Terraform state in S3 bucket -To store the Terraform state in S3 bucket instead of locally, uncomment the '[backend.tf](./backend.tf)' file and fill in the necessary details. +## Storing the Terraform State in an S3 Bucket + +To store the Terraform state in an S3 bucket instead of locally, uncomment the '[backend.tf](./backend.tf)' file and fill in the necessary details. Make sure that the user running the deployment has read and write access to this bucket. You can find the required permissions [here](https://developer.hashicorp.com/terraform/language/settings/backends/s3#s3-bucket-permissions). -### Working with DSF Hub and Agentless Gateway without outbound internet access +## Deploying with DSF Hub and Agentless Gateway without Outbound Internet Access + Follow these steps to deploy a DSF Hub and/or Agentless Gateway in an environment without outbound internet access. 1. Provide a custom AMI with the following dependencies: AWS CLI, unzip, lvm2 and jq. You can create a custom AMI with these dependencies installed by launching an Amazon EC2 instance, installing the dependencies, and creating an AMI from the instance. You can then use this custom AMI when launching the DSF Hub and/or Agentless Gateway instances. 2. Update the _ami_ variable in your Terraform example with the details of the custom AMI you created. -3. Create an S3 VPC endpoint to allow the instances to access S3 without going over the internet. You can create an S3 VPC endpoint using the Amazon VPC console, AWS CLI, or AWS SDKs. -4. Create a Secrets Manager VPC endpoint to allow the instances to access Secrets Manager without going over the internet. You can create a Secrets Manager VPC endpoint using the Amazon VPC console, AWS CLI, or AWS SDKs. +3. Create an S3 VPC endpoint to allow the instances to access S3 without going over the internet. You can create an S3 VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. +4. Create a Secrets Manager VPC endpoint to allow the instances to access the Secrets Manager without going over the internet. You can create a Secrets Manager VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. diff --git a/examples/installation/sonar_single_account_deployment/README.md b/examples/installation/sonar_single_account_deployment/README.md index 30b3541c2..63f669718 100644 --- a/examples/installation/sonar_single_account_deployment/README.md +++ b/examples/installation/sonar_single_account_deployment/README.md @@ -11,43 +11,54 @@ This deployment consists of: 4. DSF Hub HADR setup 5. Federation -This example is intended for PS/customers who want to bring their own networking. -It is mandatory to provide as input to this example the subnets to deploy the DSF Hub and the Agentless Gateway. -They can be in the same or in different subnets.
+This example is intended for Professional Service and customers who want to bring their own networking, security groups, etc.
+It is mandatory to provide as input to this example the following variables: +1. The AWS profile of the DSF Hub and Agentless Gateways' AWS account +2. The AWS region of the DSF Hubs and Agentless Gateways +3. The subnets in which to deploy the DSF Hub and the Agentless Gateways, they can be in the same or in different subnets -#### Running terraform with variables -In the current example setting AWS profile, AWS region and the subnets of the DSF Hub and the Agentless Gateway are mandatory.
+It is not mandatory to provide the security groups Ids of the DSF Hubs and the Agentless Gateways, but in case they are provided, you should add the CIDRs and ports of the Agentless Gateways to the security groups of the DSF Hubs and vice versa before running the deployment.
-This example contains variables with default values. In order to customize the variables, you can use the following: -* Run terraform with variables in a command line. For example, to run this example and specify a desired workstation CIDR that allows SSH and debugging access to the DSF Hub instead of using the default workstation CIDR where the installation is running from, run the following: - ```bash - terraform apply -auto-approve -var 'aws_profile="myProfile"' -var 'aws_region="us-east-1"' -var 'subnet_hub_primary="subnet-xxxxxxxxxxxxxxxx1"' -var 'subnet_hub_secondary="subnet-xxxxxxxxxxxxxxxx2"' -var 'subnet_gw="subnet-xxxxxxxxxxxxxxxx3"' -var 'workstation_cidr=["10.0.0.0/24"]' - ``` -* In case there are a lot of variables to change, it might be convenient to run terraform using a file named 'terraform.tfvars' which should contain all the mandatory and customized variables. Using 'terraform.tfvars' file replace the need to use 'var' parameter in terraform apply command. The file should be located under the same example's directory.

-Example for 'terraform.tfvars' file with a desired subnets and SSH verification skip for the DSF Hub and the Agentless Gateway:
-aws_profile="myProfile"
-aws_region="us-east-1"
-subnet_hub_primary="subnet-xxxxxxxxxxxxxxxx1"
-subnet_hub_secondary="subnet-xxxxxxxxxxxxxxxx2"
-subnet_gw="subnet-xxxxxxxxxxxxxxxx3"
-hub_skip_instance_health_verification=true
-gw_skip_instance_health_verification=true

- - In this case the deployment can be run by the following command: +## Customizing Variables + +There are various ways to customize variables in Terraform, in this example, it is recommended to create a 'terrafrom.tfvars' +file in the example's directory, and add the customized variables to it. + +For example: + + ```tf + aws_profile = "myProfile" + aws_region = "us-east-1" + subnet_hub_primary = "subnet-xxxxxxxxxxxxxxxx1" + subnet_hub_secondary = "subnet-xxxxxxxxxxxxxxxx2" + subnet_gw = "subnet-xxxxxxxxxxxxxxxx3" + security_group_ids_hub = ["sg-xxxxxxxxxxxxxxxx11", "sg-xxxxxxxxxxxxxxxx12"] + security_group_ids_gw = ["sg-xxxxxxxxxxxxxxxx21", "sg-xxxxxxxxxxxxxxxx22"] + tarball_location = { + s3_bucket = "bucket_name" + s3_region = "us-east-1" + s3_key = "tarball_name" + } + workstation_cidr = ["10.0.0.0/24"] + ``` + +Then run the deployment as usual with the following command: ```bash - terraform apply -auto-approve + terraform apply ``` For a full list of this example's customization options which don't require code changes, refer to the [variables.tf](./variables.tf) file. -### Storing Terraform state in S3 bucket -To store the Terraform state in S3 bucket instead of locally, uncomment the '[backend.tf](./backend.tf)' file and fill in the necessary details. +## Storing the Terraform State in an S3 Bucket + +To store the Terraform state in an S3 bucket instead of locally, uncomment the '[backend.tf](./backend.tf)' file and fill in the necessary details. Make sure that the user running the deployment has read and write access to this bucket. You can find the required permissions [here](https://developer.hashicorp.com/terraform/language/settings/backends/s3#s3-bucket-permissions). -### Working with DSF Hub and Agentless Gateway without outbound internet access +## Deploying with DSF Hub and Agentless Gateway without Outbound Internet Access + Follow these steps to deploy a DSF Hub and/or Agentless Gateway in an environment without outbound internet access. -1. Provide a custom AMI with the following dependencies: AWS CLI, unzip, lvm2 and jq. - You can create a custom AMI with these dependencies installed by launching an Amazon EC2 instance, installing the dependencies, and creating an AMI from the instance. +1. Provide a custom AMI with the following dependencies: AWS CLI, unzip, lvm2 and jq. + You can create a custom AMI with these dependencies installed by launching an Amazon EC2 instance, installing the dependencies, and creating an AMI from the instance. You can then use this custom AMI when launching the DSF Hub and/or Agentless Gateway instances. 2. Update the _ami_ variable in your Terraform example with the details of the custom AMI you created. -3. Create an S3 VPC endpoint to allow the instances to access S3 without going over the internet. You can create an S3 VPC endpoint using the Amazon VPC console, AWS CLI, or AWS SDKs. -4. Create a Secrets Manager VPC endpoint to allow the instances to access Secrets Manager without going over the internet. You can create a Secrets Manager VPC endpoint using the Amazon VPC console, AWS CLI, or AWS SDKs. +3. Create an S3 VPC endpoint to allow the instances to access S3 without going over the internet. You can create an S3 VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. +4. Create a Secrets Manager VPC endpoint to allow the instances to access the Secrets Manager without going over the internet. You can create a Secrets Manager VPC endpoint using the Amazon VPC console, AWS CLI, or an AWS SDK. diff --git a/examples/poc/dsf_deployment/README.md b/examples/poc/dsf_deployment/README.md index 56020dd6b..e874d006d 100644 --- a/examples/poc/dsf_deployment/README.md +++ b/examples/poc/dsf_deployment/README.md @@ -1,4 +1,4 @@ -# DSF Deployment Example +# DSF Deployment example [![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags) This example provides a full DSF (Data Security Fabric) deployment with DSF Hub, Agentless Gateways, DAM (Database Activity Monitoring), DRA (Data Risk Analytics) and Agent and Agentless audit sources. diff --git a/examples/poc/dsf_deployment/variables.tf b/examples/poc/dsf_deployment/variables.tf index 74dba4039..56794297f 100644 --- a/examples/poc/dsf_deployment/variables.tf +++ b/examples/poc/dsf_deployment/variables.tf @@ -93,7 +93,7 @@ variable "subnet_ids" { db_subnet_ids = list(string) }) default = null - description = "The IDs of an existing subnets to deploy resources in. Keep empty if you wish to provision new VPC and subnets. db_subnet_ids can be an empty list only if no databases should be provisioned" + description = "The IDs of existing subnets to deploy resources in. Keep empty if you wish to provision new VPC and subnets. db_subnet_ids can be an empty list only if no databases should be provisioned" validation { condition = var.subnet_ids == null || try(var.subnet_ids.hub_subnet_id != null && var.subnet_ids.hub_secondary_subnet_id != null && var.subnet_ids.agentless_gw_subnet_id != null && var.subnet_ids.agentless_gw_secondary_subnet_id != null && var.subnet_ids.mx_subnet_id != null && var.subnet_ids.agent_gw_subnet_id != null && var.subnet_ids.admin_subnet_id != null && var.subnet_ids.analytics_subnet_id != null && var.subnet_ids.db_subnet_ids != null, false) error_message = "Value must either be null or specified for all"