diff --git a/README.md b/README.md index 08fe28c75..d740908b6 100644 --- a/README.md +++ b/README.md @@ -198,6 +198,7 @@ Then perform the following commands on the root folder: | resource\_usage\_export\_dataset\_id | The ID of a BigQuery Dataset for using BigQuery as the destination of resource usage export. | `string` | `""` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | diff --git a/autogen/main/README.md b/autogen/main/README.md index 17e40ddba..2d79b07ea 100644 --- a/autogen/main/README.md +++ b/autogen/main/README.md @@ -218,7 +218,7 @@ The node_pools variable takes the following parameters: | name | The name of the node pool | | Required | {% if beta_cluster %} | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | -| pod_range | The ID of the secondary range for pod IPs. | | Optional | +| pod_range | The name of the secondary range for pod IPs. | | Optional | {% endif %} | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | diff --git a/autogen/main/firewall.tf.tmpl b/autogen/main/firewall.tf.tmpl index aa0f41316..64d4df3bd 100644 --- a/autogen/main/firewall.tf.tmpl +++ b/autogen/main/firewall.tf.tmpl @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -143,7 +144,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -154,8 +155,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -177,8 +181,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -209,7 +216,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/autogen/main/main.tf.tmpl b/autogen/main/main.tf.tmpl index dc53e13da..109146110 100644 --- a/autogen/main/main.tf.tmpl +++ b/autogen/main/main.tf.tmpl @@ -86,6 +86,11 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} +{% if autopilot_cluster != true %} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools): local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0] )) : [] +{% else %} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? [local.cluster_alias_ranges_cidr[var.ip_range_pods]] : [] +{% endif %} {% if autopilot_cluster != true %} cluster_network_policy = var.network_policy ? [{ diff --git a/autogen/main/variables.tf.tmpl b/autogen/main/variables.tf.tmpl index c39235a0f..7d57c4d74 100644 --- a/autogen/main/variables.tf.tmpl +++ b/autogen/main/variables.tf.tmpl @@ -485,9 +485,23 @@ variable "add_shadow_firewall_rules" { } variable "shadow_firewall_rules_priority" { - type = number + type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." - default = 999 + default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } {% if beta_cluster %} diff --git a/docs/private_clusters.md b/docs/private_clusters.md index 24a995d1b..2f3b57fbb 100644 --- a/docs/private_clusters.md +++ b/docs/private_clusters.md @@ -20,6 +20,16 @@ If you are using these features with a private cluster, you will need to either: If you are going to isolate your GKE private clusters from internet access you could check [this guide](https://medium.com/google-cloud/completely-private-gke-clusters-with-no-internet-connectivity-945fffae1ccd) and the associated [repo](https://github.com/andreyk-code/no-inet-gke-cluster). +## Discontiguous multi-Pod CIDR +If you are going to use [discontiguous multi-Pod CIDR](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr) it can happen that GKE robot will not update `gke-[cluster-name]-[cluster-hash]-all` and other firewall rules automatically when you add a new node pool (as stated in [documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr#modified_firewall_rule)). You can prevent this from happening, by using a workaround with shadow firewall rules: +``` +module "gke" { + ... + add_shadow_firewall_rules = true + shadow_firewall_rules_log_config = null # to save some $ on logs +} +``` + ## Troubleshooting ### Master Authorized Network diff --git a/firewall.tf b/firewall.tf index 8586855e8..a754fda5c 100644 --- a/firewall.tf +++ b/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -99,7 +100,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -110,8 +111,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -133,8 +137,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -165,7 +172,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/main.tf b/main.tf index ff006e3ab..37a6f70ea 100644 --- a/main.tf +++ b/main.tf @@ -73,6 +73,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/beta-autopilot-private-cluster/README.md b/modules/beta-autopilot-private-cluster/README.md index edc894d1f..5b0e80e36 100644 --- a/modules/beta-autopilot-private-cluster/README.md +++ b/modules/beta-autopilot-private-cluster/README.md @@ -125,6 +125,7 @@ Then perform the following commands on the root folder: | resource\_usage\_export\_dataset\_id | The ID of a BigQuery Dataset for using BigQuery as the destination of resource usage export. | `string` | `""` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | diff --git a/modules/beta-autopilot-private-cluster/firewall.tf b/modules/beta-autopilot-private-cluster/firewall.tf index 14c3e54cb..96eecab80 100644 --- a/modules/beta-autopilot-private-cluster/firewall.tf +++ b/modules/beta-autopilot-private-cluster/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -126,7 +127,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -137,8 +138,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -160,8 +164,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -192,7 +199,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-autopilot-private-cluster/main.tf b/modules/beta-autopilot-private-cluster/main.tf index 613be8b4f..18d889a0e 100644 --- a/modules/beta-autopilot-private-cluster/main.tf +++ b/modules/beta-autopilot-private-cluster/main.tf @@ -60,6 +60,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? [local.cluster_alias_ranges_cidr[var.ip_range_pods]] : [] cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{ diff --git a/modules/beta-autopilot-private-cluster/variables.tf b/modules/beta-autopilot-private-cluster/variables.tf index 88267be7d..3be57a3fa 100644 --- a/modules/beta-autopilot-private-cluster/variables.tf +++ b/modules/beta-autopilot-private-cluster/variables.tf @@ -357,6 +357,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/beta-autopilot-public-cluster/README.md b/modules/beta-autopilot-public-cluster/README.md index 14c6eb91f..274dc7faa 100644 --- a/modules/beta-autopilot-public-cluster/README.md +++ b/modules/beta-autopilot-public-cluster/README.md @@ -114,6 +114,7 @@ Then perform the following commands on the root folder: | resource\_usage\_export\_dataset\_id | The ID of a BigQuery Dataset for using BigQuery as the destination of resource usage export. | `string` | `""` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | diff --git a/modules/beta-autopilot-public-cluster/firewall.tf b/modules/beta-autopilot-public-cluster/firewall.tf index 6b71404b7..df15a0236 100644 --- a/modules/beta-autopilot-public-cluster/firewall.tf +++ b/modules/beta-autopilot-public-cluster/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -135,7 +136,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -146,8 +147,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -169,8 +173,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -201,7 +208,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-autopilot-public-cluster/main.tf b/modules/beta-autopilot-public-cluster/main.tf index 1e69f0933..652b423f7 100644 --- a/modules/beta-autopilot-public-cluster/main.tf +++ b/modules/beta-autopilot-public-cluster/main.tf @@ -60,6 +60,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? [local.cluster_alias_ranges_cidr[var.ip_range_pods]] : [] cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{ diff --git a/modules/beta-autopilot-public-cluster/variables.tf b/modules/beta-autopilot-public-cluster/variables.tf index e465277f6..e4f4bac2a 100644 --- a/modules/beta-autopilot-public-cluster/variables.tf +++ b/modules/beta-autopilot-public-cluster/variables.tf @@ -327,6 +327,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 0a62257bd..c2f4a90c7 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -255,6 +255,7 @@ Then perform the following commands on the root folder: | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` to use it). | `bool` | `false` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | @@ -343,7 +344,7 @@ The node_pools variable takes the following parameters: | min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | -| pod_range | The ID of the secondary range for pod IPs. | | Optional | +| pod_range | The name of the secondary range for pod IPs. | | Optional | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | | node_metadata | Options to expose the node metadata to the workload running on the node | | Optional | diff --git a/modules/beta-private-cluster-update-variant/firewall.tf b/modules/beta-private-cluster-update-variant/firewall.tf index 14c3e54cb..96eecab80 100644 --- a/modules/beta-private-cluster-update-variant/firewall.tf +++ b/modules/beta-private-cluster-update-variant/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -126,7 +127,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -137,8 +138,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -160,8 +164,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -192,7 +199,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index ca3604ef1..cc14d3590 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -74,6 +74,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index e51ecb34d..958f589b4 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -470,6 +470,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 96bb1ca0c..6e71f4cae 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -233,6 +233,7 @@ Then perform the following commands on the root folder: | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` to use it). | `bool` | `false` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | @@ -321,7 +322,7 @@ The node_pools variable takes the following parameters: | min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | -| pod_range | The ID of the secondary range for pod IPs. | | Optional | +| pod_range | The name of the secondary range for pod IPs. | | Optional | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | | node_metadata | Options to expose the node metadata to the workload running on the node | | Optional | diff --git a/modules/beta-private-cluster/firewall.tf b/modules/beta-private-cluster/firewall.tf index 14c3e54cb..96eecab80 100644 --- a/modules/beta-private-cluster/firewall.tf +++ b/modules/beta-private-cluster/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -126,7 +127,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -137,8 +138,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -160,8 +164,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -192,7 +199,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index ca3604ef1..cc14d3590 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -74,6 +74,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index e51ecb34d..958f589b4 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -470,6 +470,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/beta-public-cluster-update-variant/README.md b/modules/beta-public-cluster-update-variant/README.md index 4cbb6b77b..e4bd8d3cc 100644 --- a/modules/beta-public-cluster-update-variant/README.md +++ b/modules/beta-public-cluster-update-variant/README.md @@ -244,6 +244,7 @@ Then perform the following commands on the root folder: | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` to use it). | `bool` | `false` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | @@ -330,7 +331,7 @@ The node_pools variable takes the following parameters: | min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | -| pod_range | The ID of the secondary range for pod IPs. | | Optional | +| pod_range | The name of the secondary range for pod IPs. | | Optional | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | | node_metadata | Options to expose the node metadata to the workload running on the node | | Optional | diff --git a/modules/beta-public-cluster-update-variant/firewall.tf b/modules/beta-public-cluster-update-variant/firewall.tf index 6b71404b7..df15a0236 100644 --- a/modules/beta-public-cluster-update-variant/firewall.tf +++ b/modules/beta-public-cluster-update-variant/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -135,7 +136,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -146,8 +147,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -169,8 +173,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -201,7 +208,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-public-cluster-update-variant/main.tf b/modules/beta-public-cluster-update-variant/main.tf index 0a4b4e126..3dd8c73e4 100644 --- a/modules/beta-public-cluster-update-variant/main.tf +++ b/modules/beta-public-cluster-update-variant/main.tf @@ -74,6 +74,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/beta-public-cluster-update-variant/variables.tf b/modules/beta-public-cluster-update-variant/variables.tf index 2de66dd1b..cf768470b 100644 --- a/modules/beta-public-cluster-update-variant/variables.tf +++ b/modules/beta-public-cluster-update-variant/variables.tf @@ -440,6 +440,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index f95eb9984..091e96785 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -222,6 +222,7 @@ Then perform the following commands on the root folder: | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` to use it). | `bool` | `false` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | @@ -308,7 +309,7 @@ The node_pools variable takes the following parameters: | min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | -| pod_range | The ID of the secondary range for pod IPs. | | Optional | +| pod_range | The name of the secondary range for pod IPs. | | Optional | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | | node_metadata | Options to expose the node metadata to the workload running on the node | | Optional | diff --git a/modules/beta-public-cluster/firewall.tf b/modules/beta-public-cluster/firewall.tf index 6b71404b7..df15a0236 100644 --- a/modules/beta-public-cluster/firewall.tf +++ b/modules/beta-public-cluster/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -135,7 +136,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -146,8 +147,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -169,8 +173,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -201,7 +208,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 0a4b4e126..3dd8c73e4 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -74,6 +74,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 2de66dd1b..cf768470b 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -440,6 +440,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } variable "enable_confidential_nodes" { diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index ad2579450..bdebd7adc 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -231,6 +231,7 @@ Then perform the following commands on the root folder: | resource\_usage\_export\_dataset\_id | The ID of a BigQuery Dataset for using BigQuery as the destination of resource usage export. | `string` | `""` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | diff --git a/modules/private-cluster-update-variant/firewall.tf b/modules/private-cluster-update-variant/firewall.tf index c9b4d2997..d9507532c 100644 --- a/modules/private-cluster-update-variant/firewall.tf +++ b/modules/private-cluster-update-variant/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -93,7 +94,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -104,8 +105,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -127,8 +131,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -159,7 +166,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index deacafb80..f2f012078 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -73,6 +73,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 3e2ee6a73..038ca7d96 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -451,6 +451,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index cfafe37a1..5230da33d 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -209,6 +209,7 @@ Then perform the following commands on the root folder: | resource\_usage\_export\_dataset\_id | The ID of a BigQuery Dataset for using BigQuery as the destination of resource usage export. | `string` | `""` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create\_service\_account variable default value (true) will cause a cluster-specific service account to be created. | `string` | `""` | no | | service\_external\_ips | Whether external ips specified by a service will be allowed in this cluster | `bool` | `false` | no | +| shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | diff --git a/modules/private-cluster/firewall.tf b/modules/private-cluster/firewall.tf index c9b4d2997..d9507532c 100644 --- a/modules/private-cluster/firewall.tf +++ b/modules/private-cluster/firewall.tf @@ -34,11 +34,12 @@ resource "google_compute_firewall" "intra_egress" { direction = "EGRESS" target_tags = [local.cluster_network_tag] - destination_ranges = [ + destination_ranges = concat([ local.cluster_endpoint_for_nodes, local.cluster_subnet_cidr, - local.cluster_alias_ranges_cidr[var.ip_range_pods], - ] + ], + local.pod_all_ip_ranges + ) # Allow all possible protocols allow { protocol = "tcp" } @@ -93,7 +94,7 @@ resource "google_compute_firewall" "shadow_allow_pods" { priority = var.shadow_firewall_rules_priority direction = "INGRESS" - source_ranges = [local.cluster_alias_ranges_cidr[var.ip_range_pods]] + source_ranges = local.pod_all_ip_ranges target_tags = [local.cluster_network_tag] # Allow all possible protocols @@ -104,8 +105,11 @@ resource "google_compute_firewall" "shadow_allow_pods" { allow { protocol = "esp" } allow { protocol = "ah" } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -127,8 +131,11 @@ resource "google_compute_firewall" "shadow_allow_master" { ports = ["10250", "443"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } @@ -159,7 +166,63 @@ resource "google_compute_firewall" "shadow_allow_nodes" { ports = ["1-65535"] } - log_config { - metadata = "INCLUDE_ALL_METADATA" + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_allow_inkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-inkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default rule allowing worker nodes & pods communication to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority - 1 # rule created by GKE robot have prio 999 + direction = "INGRESS" + + source_ranges = local.pod_all_ip_ranges + source_tags = [local.cluster_network_tag] + target_tags = [local.cluster_network_tag] + + allow { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } + } +} + +resource "google_compute_firewall" "shadow_deny_exkubelet" { + count = var.add_shadow_firewall_rules ? 1 : 0 + + name = "gke-shadow-${substr(var.name, 0, min(25, length(var.name)))}-exkubelet" + description = "Managed by terraform GKE module: A shadow firewall rule to match the default deny rule to kubelet." + project = local.network_project_id + network = var.network + priority = var.shadow_firewall_rules_priority # rule created by GKE robot have prio 1000 + direction = "INGRESS" + + source_ranges = ["0.0.0.0/0"] + target_tags = [local.cluster_network_tag] + + deny { + protocol = "tcp" + ports = ["10255"] + } + + dynamic "log_config" { + for_each = var.shadow_firewall_rules_log_config == null ? [] : [var.shadow_firewall_rules_log_config] + content { + metadata = log_config.value.metadata + } } } diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index deacafb80..f2f012078 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -73,6 +73,7 @@ locals { cluster_subnet_cidr = var.add_cluster_firewall_rules ? data.google_compute_subnetwork.gke_subnetwork[0].ip_cidr_range : null cluster_alias_ranges_cidr = var.add_cluster_firewall_rules ? { for range in toset(data.google_compute_subnetwork.gke_subnetwork[0].secondary_ip_range) : range.range_name => range.ip_cidr_range } : {} + pod_all_ip_ranges = var.add_cluster_firewall_rules ? compact(concat([local.cluster_alias_ranges_cidr[var.ip_range_pods]], [for k, v in merge(local.node_pools, local.windows_node_pools) : local.cluster_alias_ranges_cidr[v.pod_range] if length(lookup(v, "pod_range", "")) > 0])) : [] cluster_network_policy = var.network_policy ? [{ enabled = true diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 3e2ee6a73..038ca7d96 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -451,6 +451,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } } diff --git a/variables.tf b/variables.tf index c6b3f70ae..13839b02f 100644 --- a/variables.tf +++ b/variables.tf @@ -421,6 +421,20 @@ variable "shadow_firewall_rules_priority" { type = number description = "The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000." default = 999 + validation { + condition = var.shadow_firewall_rules_priority < 1000 + error_message = "The shadow firewall rule priority must be lower than auto-created one(1000)." + } +} + +variable "shadow_firewall_rules_log_config" { + type = object({ + metadata = string + }) + description = "The log_config for shadow firewall rules. You can set this variable to `null` to disable logging." + default = { + metadata = "INCLUDE_ALL_METADATA" + } }