Untitled

mail@pastecode.io avatarunknown
plain_text
23 days ago
8.1 kB
0
Indexable
Never
resource "random_id" "k8s-node-pool-id" {
  byte_length = 2

  keepers = {
    // New random ID will be generated when specfific variable/attribute of the node_pools list will change
    // It helps with recreation of random_id before node_pool will be recreated -> enables usage of "create_before_destroy"
    k8s_max_pods_per_node   = var.k8s_max_pods_per_node
    k8s_worker_machine_type = var.k8s_worker_machine_type
    k8s_worker_disksize_gb  = var.k8s_worker_disksize_gb
    k8s_worker_image_type   = var.k8s_worker_image_type
  }
}

resource "google_container_node_pool" "k8s-node-pool" {

  name     = "${var.k8s_cluster_name}-pool-${random_id.k8s-node-pool-id.hex}"
  provider = google
  project  = var.k8s_project
  location = var.region

  cluster = google_container_cluster.privcluster.name

  max_pods_per_node = var.k8s_max_pods_per_node

  // only one of these parameters: "initial_node_count" and "node_count" may be set, NOT both
  initial_node_count = var.k8s_node_pool_autoscaler ? var.k8s_node_count_per_zone : null
  node_count         = var.k8s_node_pool_autoscaler ? null : var.k8s_node_count_per_zone
  dynamic "autoscaling" {
    for_each = var.k8s_node_pool_autoscaler ? ["enabled"] : []
    content {
      min_node_count = var.k8s_node_count_per_zone
      max_node_count = var.k8s_max_node_count_per_zone == null ? var.k8s_node_count_per_zone : var.k8s_max_node_count_per_zone
    }
  }

  node_config {
    image_type   = var.k8s_worker_image_type
    machine_type = var.k8s_worker_machine_type

    tags = ["k8s-nodes", "opl-egress-proxy-client"]

    // metadata must be set to take into account the default value set by API: disable-legacy-endpints = true
    // otherwise subsequent execution of this module with this resource will recreate it due to attempt to remove metadata
    metadata = {
      disable-legacy-endpoints = "true"
    }

    disk_size_gb = var.k8s_worker_disksize_gb
    disk_type    = "pd-ssd"

    boot_disk_kms_key = var.k8s_node_boot_disk_encryption_key

    service_account = google_service_account.sa-opl-gkenodepool.email

    // Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles.
    oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]

    workload_metadata_config {
      //node_metadata = "GKE_METADATA_SERVER"
      mode = "GKE_METADATA"
    }
    /*  check if it is still needed:

    shielded_instance_config {
      enable_secure_boot          = lookup(each.value, "enable_secure_boot", false)
      enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true)
    }
*/
  }

  // Updates should ignore initial_node_count - it is used to prevent recreation of node pool
  // when autoscaler is working and changes number of nodes to another value than specified by initial_node_count
  lifecycle {
    ignore_changes        = [initial_node_count]
    create_before_destroy = true
  }

  // ---------------------------------------------
  timeouts {
    create = "45m"
    update = "45m"
    delete = "45m"
  }
}

resource "random_id" "additional_node_pools" {
  for_each    = local.additional_node_pools
  byte_length = 2

  keepers = {
    // New random ID will be generated when specfific variable/attribute of the node_pools list will change
    // It helps with recreation of random_id before node_pool will be recreated -> enables usage of "create_before_destroy"
    pod_range_subnet_name   = local.additional_node_pools["${each.key}"].pod_range_subnet_name
    k8s_max_pods_per_node   = local.additional_node_pools["${each.key}"].k8s_max_pods_per_node
    k8s_worker_machine_type = local.additional_node_pools["${each.key}"].k8s_worker_machine_type
    k8s_worker_disksize_gb  = local.additional_node_pools["${each.key}"].k8s_worker_disksize_gb
    k8s_worker_image_type   = var.k8s_worker_image_type
  }
}

resource "google_container_node_pool" "additonal_node_pools" {
  cluster = google_container_cluster.privcluster.name
  // network_config is only available in the beta provider
  provider = google-beta
  project  = var.k8s_project
  location = var.region

  for_each = local.additional_node_pools
  name     = "${var.k8s_cluster_name}-pool-${each.key}-${random_id.additional_node_pools["${each.key}"].hex}"

  max_pods_per_node = each.value.k8s_max_pods_per_node

  initial_node_count = each.value.k8s_node_pool_autoscaler ? each.value.k8s_node_count_per_zone : null
  node_count         = each.value.k8s_node_pool_autoscaler ? null : each.value.k8s_node_count_per_zone

  dynamic "network_config" {
    for_each = each.value.pod_range_subnet_name != "" ? [1] : []
    content {
      pod_range = each.value.pod_range_subnet_name
    }
  }

  dynamic "autoscaling" {
    for_each = each.value.k8s_node_pool_autoscaler ? ["enabled"] : []

    content {
      min_node_count = each.value.k8s_node_count_per_zone
      max_node_count = each.value.k8s_max_node_count_per_zone == null ? each.value.k8s_node_count_per_zone : each.value.k8s_max_node_count_per_zone
    }
  }

  node_config {
    image_type        = var.k8s_worker_image_type
    machine_type      = each.value.k8s_worker_machine_type
    boot_disk_kms_key = var.k8s_node_boot_disk_encryption_key
    service_account   = google_service_account.sa-opl-gkenodepool.email

    tags = ["k8s-nodes", "opl-egress-proxy-client"]

    // metadata must be set to take into account the default value set by API: disable-legacy-endpints = true
    // otherwise subsequent execution of this module with this resource will recreate it due to attempt to remove metadata
    metadata = {
      disable-legacy-endpoints = "true"
    }

    disk_size_gb = each.value.k8s_worker_disksize_gb
    disk_type    = "pd-ssd"

    // Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles.
    oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]

    workload_metadata_config {
      mode = "GKE_METADATA"
    }
  }

  lifecycle {
    ignore_changes        = [initial_node_count]
    create_before_destroy = true
  }

  // ---------------------------------------------
  timeouts {
    create = "45m"
    update = "45m"
    delete = "45m"
  }
}



variable "additional_node_pools" {
  description = "List of additional node pools with optionally new pod range"
  type = list(object({
    /**
     *    name - This is name that will be included in the node-pool name alongs with the cluster name and random id
     *    k8s_max_pods_per_nome - The same as in the globally declared k8s_max_pods_per_node but per this node, if not set the globally value will be taken.
     *    k8s_max_node_count_per_zone - The same as in the globally declared k8s_max_node_count_per_zone but per this node, if not set the globally value will be taken.
     *    k8s_node_count_per_zone - The same as in the globally declared k8s_node_count_per_zone but per this node, if not set the globally value will be taken.
     *    k8s_node_pool_autoscaler - The same as in the globally declared k8s_node_pool_autoscaler but per this node, if not set the globally value will be taken.
     *    k8s_worker_machine_type - The same as in the globally declared k8s_worker_machine_type but per this node, if not set the globally value will be taken.
     *    k8s_worker_disksize_gb - The same as in the globally declared k8s_worker_disksize_gb but per this node, if not set the globally value will be taken.
     *    pod_range_subnet_name - It is the name of the secdonary subnet with IP aliasing that will be used for this node_pool. Needed when we got rid of IPs.
    */
    name                        = string
    k8s_max_pods_per_node       = optional(number, null)
    k8s_max_node_count_per_zone = optional(number, null)
    k8s_node_count_per_zone     = optional(number, null)
    k8s_node_pool_autoscaler    = optional(bool, null)
    k8s_worker_machine_type     = optional(string, "")
    k8s_worker_disksize_gb      = optional(number, null)
    pod_range_subnet_name       = optional(string, "")
  }))
  default = []
}