From ff700afa979753dc896bf34bbf69a822fb5bebf2 Mon Sep 17 00:00:00 2001 From: Bohdan Yurov Date: Thu, 13 Jun 2019 20:14:06 +0300 Subject: [PATCH] Fixes #158: Add support for Terraform v0.12 https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/158 Add support for TF 0.12. - autogen - root and private_cluster modules - tests (including support for validation) - examples --- README.md | 73 +---- auth.tf | 6 +- autogen/README.md | 4 +- autogen/auth.tf | 7 +- autogen/cluster_regional.tf | 203 +++++++++----- autogen/cluster_zonal.tf | 207 ++++++++++----- autogen/dns.tf | 25 +- autogen/main.tf | 250 ++++++++++++------ autogen/masq.tf | 14 +- autogen/networks.tf | 14 +- autogen/outputs.tf | 38 +-- autogen/sa.tf | 37 +-- autogen/variables.tf | 63 +++-- autogen/versions.tf | 4 + cluster_regional.tf | 197 +++++++++----- cluster_zonal.tf | 194 +++++++++----- dns.tf | 25 +- examples/deploy_service/main.tf | 40 +-- examples/deploy_service/outputs.tf | 9 +- examples/deploy_service/variables.tf | 1 + examples/disable_client_cert/main.tf | 29 +- examples/disable_client_cert/outputs.tf | 9 +- examples/disable_client_cert/variables.tf | 1 + examples/node_pool/main.tf | 44 ++- examples/node_pool/outputs.tf | 9 +- examples/node_pool/variables.tf | 3 +- examples/shared_vpc/main.tf | 28 +- examples/shared_vpc/outputs.tf | 9 +- examples/shared_vpc/variables.tf | 1 + examples/simple_regional/main.tf | 26 +- examples/simple_regional/outputs.tf | 9 +- examples/simple_regional/variables.tf | 1 + examples/simple_regional_private/main.tf | 44 +-- examples/simple_regional_private/outputs.tf | 9 +- examples/simple_regional_private/variables.tf | 1 + examples/simple_zonal/main.tf | 26 +- examples/simple_zonal/outputs.tf | 9 +- examples/simple_zonal/variables.tf | 3 +- examples/simple_zonal_private/main.tf | 46 ++-- examples/simple_zonal_private/outputs.tf | 9 +- examples/simple_zonal_private/variables.tf | 3 +- examples/stub_domains/main.tf | 29 +- examples/stub_domains/outputs.tf | 9 +- examples/stub_domains/variables.tf | 1 + examples/stub_domains_private/main.tf | 47 ++-- examples/stub_domains_private/outputs.tf | 9 +- examples/stub_domains_private/test_outputs.tf | 21 +- examples/stub_domains_private/variables.tf | 1 + main.tf | 236 ++++++++++++----- masq.tf | 14 +- modules/private-cluster/README.md | 78 +----- modules/private-cluster/auth.tf | 6 +- modules/private-cluster/cluster_regional.tf | 206 ++++++++++----- modules/private-cluster/cluster_zonal.tf | 202 +++++++++----- modules/private-cluster/dns.tf | 25 +- modules/private-cluster/main.tf | 241 +++++++++++------ modules/private-cluster/masq.tf | 14 +- modules/private-cluster/networks.tf | 14 +- modules/private-cluster/outputs.tf | 38 +-- modules/private-cluster/sa.tf | 37 +-- modules/private-cluster/variables.tf | 113 +++++--- modules/private-cluster/versions.tf | 4 + networks.tf | 14 +- outputs.tf | 38 +-- sa.tf | 37 +-- test/fixtures/all_examples/test_outputs.tf | 21 +- test/fixtures/deploy_service/example.tf | 15 +- test/fixtures/deploy_service/network.tf | 7 +- test/fixtures/disable_client_cert/example.tf | 19 +- test/fixtures/disable_client_cert/network.tf | 8 +- test/fixtures/node_pool/example.tf | 17 +- test/fixtures/node_pool/network.tf | 7 +- test/fixtures/shared/outputs.tf | 29 +- test/fixtures/shared/variables.tf | 3 +- test/fixtures/shared_vpc/example.tf | 17 +- test/fixtures/shared_vpc/network.tf | 7 +- test/fixtures/simple_regional/example.tf | 15 +- test/fixtures/simple_regional/network.tf | 7 +- .../simple_regional_private/example.tf | 15 +- .../simple_regional_private/network.tf | 11 +- test/fixtures/simple_zonal/example.tf | 15 +- test/fixtures/simple_zonal/network.tf | 7 +- test/fixtures/simple_zonal_private/example.tf | 17 +- test/fixtures/simple_zonal_private/network.tf | 11 +- test/fixtures/stub_domains/example.tf | 15 +- test/fixtures/stub_domains/network.tf | 7 +- test/fixtures/stub_domains_private/main.tf | 23 +- test/make.sh | 15 +- variables.tf | 99 ++++--- versions.tf | 4 + 90 files changed, 2169 insertions(+), 1406 deletions(-) create mode 100644 autogen/versions.tf create mode 100644 modules/private-cluster/versions.tf create mode 100644 versions.tf diff --git a/README.md b/README.md index e56d7db123..1a03de6d1a 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ module "gke" { all = {} default-node-pool = { - default-node-pool = "true" + default-node-pool = true } } @@ -74,7 +74,7 @@ module "gke" { default-node-pool = [ { key = "default-node-pool" - value = "true" + value = true effect = "PREFER_NO_SCHEDULE" }, ] @@ -109,75 +109,6 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. [^]: (autogen_docs_start) - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|:----:|:-----:|:-----:| -| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | -| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| description | The description of the cluster | string | `""` | no | -| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | string | `"true"` | no | -| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | string | `"true"` | no | -| http\_load\_balancing | Enable httpload balancer addon | string | `"true"` | no | -| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | string | `"0"` | no | -| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | string | `"false"` | no | -| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | -| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | -| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | string | `"false"` | no | -| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | -| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | -| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)

### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements diff --git a/auth.tf b/auth.tf index 5ad4160145..48e7cc6a5f 100644 --- a/auth.tf +++ b/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google" + provider = google } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/autogen/README.md b/autogen/README.md index 778bfdb38e..efddf98a3f 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -66,7 +66,7 @@ module "gke" { all = {} default-node-pool = { - default-node-pool = "true" + default-node-pool = true } } @@ -84,7 +84,7 @@ module "gke" { default-node-pool = [ { key = "default-node-pool" - value = "true" + value = true effect = "PREFER_NO_SCHEDULE" }, ] diff --git a/autogen/auth.tf b/autogen/auth.tf index 3e961cd6b1..d480409119 100644 --- a/autogen/auth.tf +++ b/autogen/auth.tf @@ -20,7 +20,8 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "{% if private_cluster %}google-beta{%else %}google{% endif %}" + provider = {% if private_cluster %}google-beta{%else %}google{% endif %} + } /****************************************** @@ -29,6 +30,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/autogen/cluster_regional.tf b/autogen/cluster_regional.tf index 36bfdb1cd4..173f34cda8 100644 --- a/autogen/cluster_regional.tf +++ b/autogen/cluster_regional.tf @@ -20,73 +20,89 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "{% if private_cluster %}google-beta{%else %}google{% endif %}" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = {% if private_cluster %}google-beta{%else %}google{% endif %} - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" + region = var.region + + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) + + network = data.google_compute_network.gke_network.self_link network_policy { - enabled = "${var.network_policy}" - provider = "${var.network_policy_provider}" + enabled = var.network_policy + provider = var.network_policy_provider } - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + logging_service = var.logging_service + monitoring_service = var.monitoring_service {% if private_cluster %} - enable_binary_authorization = "${var.enable_binary_authorization}" + enable_binary_authorization = var.enable_binary_authorization {% endif %} - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = !var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = !var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = !var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = !var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -97,73 +113,121 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } -{% if private_cluster %} +{% if private_cluster %} private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } {% endif %} - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -174,16 +238,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/autogen/cluster_zonal.tf b/autogen/cluster_zonal.tf index 00b5729a4d..c79a16be9b 100644 --- a/autogen/cluster_zonal.tf +++ b/autogen/cluster_zonal.tf @@ -20,73 +20,82 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "{% if private_cluster %}google-beta{%else %}google{% endif %}" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = {% if private_cluster %}google-beta{%else %}google{% endif %} - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + + network = data.google_compute_network.gke_network.self_link network_policy { - enabled = "${var.network_policy}" - provider = "${var.network_policy_provider}" + enabled = var.network_policy + provider = var.network_policy_provider + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } } - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" - - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" - -{% if private_cluster %} - enable_binary_authorization = "${var.enable_binary_authorization}" -{% endif %} - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] - master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = !var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = !var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = !var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = !var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -97,73 +106,122 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } -{% if private_cluster %} +{% if private_cluster %} private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } {% endif %} - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -174,16 +232,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/autogen/dns.tf b/autogen/dns.tf index 1b0d83eb23..cdfbde7589 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -20,35 +20,48 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config ? 1 : 0}" + count = local.custom_kube_dns_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config ? 1 : 0}" + count = local.custom_kube_dns_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = <
### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements diff --git a/modules/private-cluster/auth.tf b/modules/private-cluster/auth.tf index 0bbafaf4a2..c177eee5a7 100644 --- a/modules/private-cluster/auth.tf +++ b/modules/private-cluster/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google-beta" + provider = google-beta } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/modules/private-cluster/cluster_regional.tf b/modules/private-cluster/cluster_regional.tf index 9630acf411..f320ea08ca 100644 --- a/modules/private-cluster/cluster_regional.tf +++ b/modules/private-cluster/cluster_regional.tf @@ -20,71 +20,86 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "google-beta" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] + region = var.region - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) + + network = data.google_compute_network.gke_network.self_link network_policy { - enabled = "${var.network_policy}" - provider = "${var.network_policy_provider}" + enabled = var.network_policy + provider = var.network_policy_provider + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } } - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" - - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" - - enable_binary_authorization = "${var.enable_binary_authorization}" - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] - master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = !var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = !var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = !var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = !var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -95,71 +110,119 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -170,16 +233,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/modules/private-cluster/cluster_zonal.tf b/modules/private-cluster/cluster_zonal.tf index fc98826190..b447b87d3d 100644 --- a/modules/private-cluster/cluster_zonal.tf +++ b/modules/private-cluster/cluster_zonal.tf @@ -20,71 +20,81 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "google-beta" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" + network = data.google_compute_network.gke_network.self_link network_policy { - enabled = "${var.network_policy}" - provider = "${var.network_policy_provider}" + enabled = var.network_policy + provider = var.network_policy_provider + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } } - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" - - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" - - enable_binary_authorization = "${var.enable_binary_authorization}" - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] - master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = !var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = !var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = !var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = !var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -95,71 +105,120 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -170,16 +229,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index 25effe580a..4d37fcef0c 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -20,35 +20,48 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config ? 1 : 0}" + count = local.custom_kube_dns_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config ? 1 : 0}" + count = local.custom_kube_dns_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < /dev/null && terraform validate "{}"' + echo "Running terraform fmt" #shellcheck disable=SC2156 - find . -name "*.tf" -not -path "./autogen/*" -not -path "./test/fixtures/shared/*" -not -path "./test/fixtures/all_examples/*" -exec bash -c 'terraform fmt -check=true -write=false "{}"' \; + find . -name "*.tf" \ + -not -path "./autogen/*" \ + -not -path "./test/fixtures/shared/*" \ + -not -path "./test/fixtures/all_examples/*" \ + -exec bash -c 'terraform fmt -check=true -write=false "{}"' \; } # This function runs 'go fmt' and 'go vet' on every file diff --git a/variables.tf b/variables.tf index 2723a39df5..087227c0d1 100644 --- a/variables.tf +++ b/variables.tf @@ -17,58 +17,68 @@ // This file was automatically generated from a template in ./autogen variable "project_id" { + type = string description = "The project ID to host the cluster in (required)" } variable "name" { + type = string description = "The name of the cluster (required)" } variable "description" { + type = string description = "The description of the cluster" default = "" } variable "regional" { + type = bool description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" default = true } variable "region" { + type = string description = "The region to host the cluster in (required)" } variable "zones" { - type = "list" + type = list(string) description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" - default = [""] + default = [] } variable "network" { + type = string description = "The VPC network to host the cluster in (required)" } variable "network_project_id" { + type = string description = "The project ID of the shared VPC's host (for shared vpc support)" default = "" } variable "subnetwork" { + type = string description = "The subnetwork to host the cluster in (required)" } variable "kubernetes_version" { + type = string description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." default = "latest" } variable "node_version" { + type = string description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." default = "" } variable "master_authorized_networks_config" { - type = "list" + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) description = <