From 5b0d8d43ff8da157c3417921834400e85ece6fc0 Mon Sep 17 00:00:00 2001 From: Darren Hague Date: Sat, 24 Feb 2018 00:55:07 +0000 Subject: [PATCH] Allow specifying accelerators in cluster node_config (#1115) --- google/node_config.go | 59 ++++++++++++--- google/resource_container_node_pool_test.go | 72 +++++++++++++++++++ .../docs/r/container_cluster.html.markdown | 9 +++ .../docs/r/container_node_pool.html.markdown | 4 ++ 4 files changed, 134 insertions(+), 10 deletions(-) diff --git a/google/node_config.go b/google/node_config.go index f8b3f46a..60fddc55 100644 --- a/google/node_config.go +++ b/google/node_config.go @@ -32,6 +32,28 @@ var schemaNodeConfig = &schema.Schema{ ValidateFunc: validation.IntAtLeast(10), }, + "guest_accelerator": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + }, + }, + }, + "image_type": { Type: schema.TypeString, Optional: true, @@ -128,6 +150,22 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.MachineType = v.(string) } + if v, ok := nodeConfig["guest_accelerator"]; ok { + accels := v.([]interface{}) + guestAccelerators := make([]*container.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + guestAccelerators = append(guestAccelerators, &container.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + AcceleratorType: data["type"].(string), + }) + } + nc.Accelerators = guestAccelerators + } + if v, ok := nodeConfig["disk_size_gb"]; ok { nc.DiskSizeGb = int64(v.(int)) } @@ -196,16 +234,17 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} { } config = append(config, map[string]interface{}{ - "machine_type": c.MachineType, - "disk_size_gb": c.DiskSizeGb, - "local_ssd_count": c.LocalSsdCount, - "service_account": c.ServiceAccount, - "metadata": c.Metadata, - "image_type": c.ImageType, - "labels": c.Labels, - "tags": c.Tags, - "preemptible": c.Preemptible, - "min_cpu_platform": c.MinCpuPlatform, + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + "guest_accelerator": c.Accelerators, + "local_ssd_count": c.LocalSsdCount, + "service_account": c.ServiceAccount, + "metadata": c.Metadata, + "image_type": c.ImageType, + "labels": c.Labels, + "tags": c.Tags, + "preemptible": c.Preemptible, + "min_cpu_platform": c.MinCpuPlatform, }) if len(c.OauthScopes) > 0 { diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index 0e8d4fde..2e41827b 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -92,6 +92,28 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { }) } +func TestAccContainerNodePool_withGPU(t *testing.T) { + t.Parallel() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_withGPU(), + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_gpu"), + ), + }, + resource.TestStep{ + ResourceName: "google_container_node_pool.np_with_gpu", + ImportState: true, + }, + }, + }) +} + func TestAccContainerNodePool_withManagement(t *testing.T) { t.Parallel() @@ -315,6 +337,18 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { } + tfGA := attributes["node_config.0.guest_accelerator.#"] == "1" + if gcpGA := nodepool.Config.Accelerators != nil && len(nodepool.Config.Accelerators) == 1; tfGA != gcpGA { + if tf := attributes["node_config.0.guest_accelerator.0.type"]; nodepool.Config.Accelerators[0].AcceleratorType != tf { + return fmt.Errorf("Mismatched NodeConfig.Accelerators type. TF State: %s. GCP State: %s", + tf, nodepool.Config.Accelerators[0].AcceleratorType) + } + if tf := attributes["node_config.0.guest_accelerator.0.count"]; strconv.FormatInt(nodepool.Config.Accelerators[0].AcceleratorCount, 10) != tf { + return fmt.Errorf("Mismatched NodeConfig.Accelerators count. TF State: %s. GCP State: %d", + tf, nodepool.Config.Accelerators[0].AcceleratorCount) + } + } + return nil } } @@ -583,6 +617,44 @@ resource "google_container_node_pool" "np_with_node_config" { }`, acctest.RandString(10), acctest.RandString(10)) } +func testAccContainerNodePool_withGPU() string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-c" + initial_node_count = 1 + node_version = "1.9.2-gke.1" + min_master_version = "1.9.2-gke.1" +} +resource "google_container_node_pool" "np_with_gpu" { + name = "tf-nodepool-test-%s" + zone = "us-central1-c" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 1 + node_config { + machine_type = "n1-standard-1" + disk_size_gb = 10 + oauth_scopes = [ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append" + ] + preemptible = true + service_account = "default" + image_type = "COS" + guest_accelerator = [ + { + type = "nvidia-tesla-k80" + count = 1 + } + ] + } +}`, acctest.RandString(10), acctest.RandString(10)) +} + func testAccContainerNodePool_withNodeConfigScopeAlias() string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index b53baaea..a5ada6a3 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -235,6 +235,9 @@ The `node_config` block supports: * `disk_size_gb` - (Optional) Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. +* `guest_accelerator` - (Optional) List of the type and count of accelerator cards attached to the instance. + Structure documented below. + * `image_type` - (Optional) The image type to use for this node. * `labels` - (Optional) The Kubernetes labels (key/value pairs) to be applied to each node. @@ -276,6 +279,12 @@ The `node_config` block supports: * `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls. +The `guest_accelerator` block supports: + +* `type` (Required) - The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. + +* `count` (Required) - The number of the guest accelerator cards exposed to this instance. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown index 962a306b..601cebf1 100644 --- a/website/docs/r/container_node_pool.html.markdown +++ b/website/docs/r/container_node_pool.html.markdown @@ -45,6 +45,10 @@ resource "google_container_cluster" "primary" { "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", ] + guest_accelerator = [{ + type="nvidia-tesla-k80" + count=1 + }] } } ```