From 34297fe4780866a100372c544054540becb9fb4f Mon Sep 17 00:00:00 2001 From: David Quarles Date: Tue, 21 Nov 2017 12:56:29 -0800 Subject: [PATCH] Add support for `google_container_node_pool` management (#669) * add support for `google_container_node_pool` management (sans-tests) * [review] add tests, docs, general cleanup * add docs * [review] amend test to check updates and terraform fmt * test updates, make nested management fields non-computed --- google/resource_container_node_pool.go | 73 +++++++++++++++++++ google/resource_container_node_pool_test.go | 71 ++++++++++++++++++ .../docs/r/container_node_pool.html.markdown | 11 ++- 3 files changed, 154 insertions(+), 1 deletion(-) diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go index 66674daa..7139f135 100644 --- a/google/resource_container_node_pool.go +++ b/google/resource_container_node_pool.go @@ -85,6 +85,28 @@ var schemaNodePool = map[string]*schema.Schema{ Deprecated: "Use node_count instead", }, + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "auto_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "name": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -301,6 +323,19 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, } } + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + np.Management = &container.NodeManagement{} + + if v, ok := managementConfig["auto_repair"]; ok { + np.Management.AutoRepair = v.(bool) + } + + if v, ok := managementConfig["auto_upgrade"]; ok { + np.Management.AutoUpgrade = v.(bool) + } + } + return np, nil } @@ -335,6 +370,13 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP } } + nodePool["management"] = []map[string]interface{}{ + { + "auto_repair": np.Management.AutoRepair, + "auto_upgrade": np.Management.AutoUpgrade, + }, + } + return nodePool, nil } @@ -412,6 +454,37 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi } } + if d.HasChange(prefix + "management") { + management := &container.NodeManagement{} + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + management.AutoRepair = managementConfig["auto_repair"].(bool) + management.AutoUpgrade = managementConfig["auto_upgrade"].(bool) + management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"} + } + req := &container.SetNodePoolManagementRequest{ + Management: management, + } + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetManagement( + project, zone, clusterName, npName, req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool management", timeoutInMinutes, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Updated management in Node Pool %s", npName) + + if prefix == "" { + d.SetPartial("management") + } + } + return nil } diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index 726aae98..ae4eb345 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -92,6 +92,51 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { }) } +func TestAccContainerNodePool_withManagement(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10)) + nodePool := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10)) + management := ` + management { + auto_repair = "true" + auto_upgrade = "true" + }` + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_withManagement(cluster, nodePool, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_management"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.#", "1"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "false"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "false"), + ), + }, + resource.TestStep{ + Config: testAccContainerNodePool_withManagement(cluster, nodePool, management), + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches( + "google_container_node_pool.np_with_management"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.#", "1"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "true"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "true"), + ), + }, + }, + }) +} + func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { t.Parallel() @@ -232,6 +277,8 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { nodepoolTests := []nodepoolTestField{ {"initial_node_count", strconv.FormatInt(nodepool.InitialNodeCount, 10)}, + {"management.0.auto_repair", nodepool.Management.AutoRepair}, + {"management.0.auto_upgrade", nodepool.Management.AutoUpgrade}, {"node_config.0.machine_type", nodepool.Config.MachineType}, {"node_config.0.disk_size_gb", strconv.FormatInt(nodepool.Config.DiskSizeGb, 10)}, {"node_config.0.local_ssd_count", strconv.FormatInt(nodepool.Config.LocalSsdCount, 10)}, @@ -416,6 +463,30 @@ resource "google_container_node_pool" "np" { }`, cluster, nodePool) } +func testAccContainerNodePool_withManagement(cluster, nodePool, management string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + zone = "us-central1-a" + initial_node_count = 1 +} + +resource "google_container_node_pool" "np_with_management" { + name = "%s" + zone = "us-central1-a" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 1 + + %s + + node_config { + machine_type = "g1-small" + disk_size_gb = 10 + oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] + } +}`, cluster, nodePool, management) +} + func nodepoolCheckMatch(attributes map[string]string, attr string, gcp interface{}) string { if gcpList, ok := gcp.([]string); ok { if _, ok := nodepoolSetFields[attr]; ok { diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown index e0e88f72..07e105be 100644 --- a/website/docs/r/container_node_pool.html.markdown +++ b/website/docs/r/container_node_pool.html.markdown @@ -63,6 +63,9 @@ resource "google_container_cluster" "primary" { * `initial_node_count` - (Deprecated, Optional) The initial node count for the pool. Use `node_count` instead. +* `management` - (Optional) Node management configuration, wherein auto-repair and + auto-upgrade is configured. Structure is documented below. + * `name` - (Optional) The name of the node pool. If left blank, Terraform will auto-generate a unique name. @@ -70,7 +73,7 @@ resource "google_container_cluster" "primary" { with the specified prefix. Conflicts with `name`. * `node_config` - (Optional) The node configuration of the pool. See - [google_container_cluster](container_cluster.html for schema. + [google_container_cluster](container_cluster.html) for schema. * `node_count` - (Optional) The number of nodes per instance group. @@ -84,6 +87,12 @@ The `autoscaling` block supports: * `max_node_count` - (Required) Maximum number of nodes in the NodePool. Must be >= min_node_count. +The `management` block supports: + +* `auto_repair` - (Optional) Whether the nodes will be automatically repaired. + +* `auto_upgrade` - (Optional) Whether the nodes will be automatically upgraded. + ## Import Node pools can be imported using the `zone`, `cluster` and `name`, e.g.