resource "random_id" "additional_node_pools" {
for_each = local.additional_node_pools
byte_length = 2
keepers = {
// New random ID will be generated when specfific variable/attribute of the node_pools list will change
// It helps with recreation of random_id before node_pool will be recreated -> enables usage of "create_before_destroy"
pod_range_subnet_name = local.additional_node_pools["${each.key}"].pod_range_subnet_name
k8s_max_pods_per_node = local.additional_node_pools["${each.key}"].k8s_max_pods_per_node
k8s_worker_machine_type = local.additional_node_pools["${each.key}"].k8s_worker_machine_type
k8s_worker_disksize_gb = local.additional_node_pools["${each.key}"].k8s_worker_disksize_gb
k8s_worker_image_type = var.k8s_worker_image_type
}
}
resource "google_container_node_pool" "additonal_node_pools" {
cluster = google_container_cluster.privcluster.name
// network_config is only available in the beta provider
provider = google-beta
project = var.k8s_project
location = var.region
for_each = local.additional_node_pools
name = "${var.k8s_cluster_name}-pool-${each.key}-${random_id.additional_node_pools["${each.key}"].hex}"
max_pods_per_node = each.value.k8s_max_pods_per_node
initial_node_count = each.value.k8s_node_pool_autoscaler ? each.value.k8s_node_count_per_zone : null
node_count = each.value.k8s_node_pool_autoscaler ? null : each.value.k8s_node_count_per_zone
dynamic "network_config" {
for_each = each.value.pod_range_subnet_name != "" ? [1] : []
content {
pod_range = each.value.pod_range_subnet_name
}
}
dynamic "autoscaling" {
for_each = each.value.k8s_node_pool_autoscaler ? ["enabled"] : []
content {
min_node_count = each.value.k8s_node_count_per_zone
max_node_count = each.value.k8s_max_node_count_per_zone == null ? each.value.k8s_node_count_per_zone : each.value.k8s_max_node_count_per_zone
}
}
node_config {
image_type = var.k8s_worker_image_type
machine_type = each.value.k8s_worker_machine_type
boot_disk_kms_key = var.k8s_node_boot_disk_encryption_key
service_account = google_service_account.sa-opl-gkenodepool.email
tags = ["k8s-nodes", "opl-egress-proxy-client"]
// metadata must be set to take into account the default value set by API: disable-legacy-endpints = true
// otherwise subsequent execution of this module with this resource will recreate it due to attempt to remove metadata
metadata = {
disable-legacy-endpoints = "true"
}
disk_size_gb = each.value.k8s_worker_disksize_gb
disk_type = "pd-ssd"
// Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles.
oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
workload_metadata_config {
mode = "GKE_METADATA"
}
}
lifecycle {
ignore_changes = [initial_node_count]
create_before_destroy = true
}
// ---------------------------------------------
timeouts {
create = "45m"
update = "45m"
delete = "45m"
}
}
variable "additional_node_pools" {
description = "List of additional node pools with optionally new pod range"
type = list(object({
/**
* name - This is name that will be included in the node-pool name alongs with the cluster name and random id
* k8s_max_pods_per_nome - The same as in the globally declared k8s_max_pods_per_node but per this node, if not set the globally value will be taken.
* k8s_max_node_count_per_zone - The same as in the globally declared k8s_max_node_count_per_zone but per this node, if not set the globally value will be taken.
* k8s_node_count_per_zone - The same as in the globally declared k8s_node_count_per_zone but per this node, if not set the globally value will be taken.
* k8s_node_pool_autoscaler - The same as in the globally declared k8s_node_pool_autoscaler but per this node, if not set the globally value will be taken.
* k8s_worker_machine_type - The same as in the globally declared k8s_worker_machine_type but per this node, if not set the globally value will be taken.
* k8s_worker_disksize_gb - The same as in the globally declared k8s_worker_disksize_gb but per this node, if not set the globally value will be taken.
* pod_range_subnet_name - It is the name of the secdonary subnet with IP aliasing that will be used for this node_pool. Needed when we got rid of IPs.
*/
name = string
k8s_max_pods_per_node = optional(number, null)
k8s_max_node_count_per_zone = optional(number, null)
k8s_node_count_per_zone = optional(number, null)
k8s_node_pool_autoscaler = optional(bool, null)
k8s_worker_machine_type = optional(string, "")
k8s_worker_disksize_gb = optional(number, null)
pod_range_subnet_name = optional(string, "")
}))
default = []
}