skip guest accelerators if count is 0. (#866)

* skip guest accelerators if count is 0.

Instances in instance groups in google will fail to provision, despite
requesting 0 GPUs. This came up for me when trying to provision
a similar instance group in all available regions, but only asking for
GPU's in those that support them by parameterizing the `count` and
setting it to 0.

This might be a violation of some terraform principles. For example,
testing locally with this change `terraform` did not recognize that
indeed my infra needed to be re-deployed (from it's pov, I assume it
believes this because inputs hadn't changed). Additionally, there may be
valid reasons for creating an instance template with 0 gpu's that can be
tuned upwards.

* Add guest accelerator skip test for instances.

* do not leave empty pointers to guest accelerators.

* attempt to clear guest accelerator diff

* conditionally customize diff for guest accels
This commit is contained in:
Jacob Straszynski 2018-01-23 11:51:36 -08:00 committed by Vincent Roseberry
parent 2a69744518
commit 939ba6d365
4 changed files with 139 additions and 16 deletions

View File

@ -8,6 +8,7 @@ import (
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/customdiff"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"github.com/mitchellh/hashstructure"
@ -496,6 +497,7 @@ func resourceComputeInstance() *schema.Resource {
"guest_accelerator": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
@ -556,6 +558,14 @@ func resourceComputeInstance() *schema.Resource {
Deprecated: "Use timeouts block instead.",
},
},
CustomizeDiff: customdiff.All(
customdiff.If(
func(d *schema.ResourceDiff, meta interface{}) bool {
return d.HasChange("guest_accelerator")
},
suppressEmptyGuestAcceleratorDiff,
),
),
}
}
@ -1216,22 +1226,67 @@ func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([
return nil, nil
}
accels := configs.([]interface{})
guestAccelerators := make([]*computeBeta.AcceleratorConfig, len(accels))
for i, raw := range accels {
guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels))
for _, raw := range accels {
data := raw.(map[string]interface{})
if data["count"].(int) == 0 {
continue
}
at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config)
if err != nil {
return nil, fmt.Errorf("cannot parse accelerator type: %v", err)
}
guestAccelerators[i] = &computeBeta.AcceleratorConfig{
guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
AcceleratorType: at.RelativeLink(),
}
})
}
return guestAccelerators, nil
}
// suppressEmptyGuestAcceleratorDiff is used to work around perpetual diff
// issues when a count of `0` guest accelerators is desired. This may occur when
// guest_accelerator support is controlled via a module variable. E.g.:
//
// guest_accelerators {
// count = "${var.enable_gpu ? var.gpu_count : 0}"
// ...
// }
// After reconciling the desired and actual state, we would otherwise see a
// perpetual resembling:
// [] != [{"count":0, "type": "nvidia-tesla-k80"}]
func suppressEmptyGuestAcceleratorDiff(d *schema.ResourceDiff, meta interface{}) error {
oldi, newi := d.GetChange("guest_accelerator")
old, ok := oldi.([]interface{})
if !ok {
return fmt.Errorf("Expected old guest accelerator diff to be a slice")
}
new, ok := newi.([]interface{})
if !ok {
return fmt.Errorf("Expected new guest accelerator diff to be a slice")
}
if len(old) != 0 && len(new) != 1 {
return nil
}
firstAccel, ok := new[0].(map[string]interface{})
if !ok {
return fmt.Errorf("Unable to type assert guest accelerator")
}
if firstAccel["count"].(int) == 0 {
if err := d.Clear("guest_accelerator"); err != nil {
return err
}
}
return nil
}
func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)

View File

@ -502,15 +502,18 @@ func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Co
return nil
}
accels := configs.([]interface{})
guestAccelerators := make([]*computeBeta.AcceleratorConfig, len(accels))
for i, raw := range accels {
guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels))
for _, raw := range accels {
data := raw.(map[string]interface{})
guestAccelerators[i] = &computeBeta.AcceleratorConfig{
if data["count"].(int) == 0 {
continue
}
guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
// We can't use ParseAcceleratorFieldValue here because an instance
// template does not have a zone we can use.
AcceleratorType: data["type"].(string),
}
})
}
return guestAccelerators

View File

@ -351,7 +351,7 @@ func TestAccComputeInstanceTemplate_guestAccelerator(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_guestAccelerator(acctest.RandString(10)),
Config: testAccComputeInstanceTemplate_guestAccelerator(acctest.RandString(10), 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists("google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateHasGuestAccelerator(&instanceTemplate, "nvidia-tesla-k80", 1),
@ -367,6 +367,28 @@ func TestAccComputeInstanceTemplate_guestAccelerator(t *testing.T) {
}
func TestAccComputeInstanceTemplate_guestAcceleratorSkip(t *testing.T) {
t.Parallel()
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_guestAccelerator(acctest.RandString(10), 0),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists("google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateLacksGuestAccelerator(&instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_minCpuPlatform(t *testing.T) {
t.Parallel()
@ -664,6 +686,16 @@ func testAccCheckComputeInstanceTemplateHasGuestAccelerator(instanceTemplate *co
}
}
func testAccCheckComputeInstanceTemplateLacksGuestAccelerator(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(instanceTemplate.Properties.GuestAccelerators) > 0 {
return fmt.Errorf("Expected no guest accelerators")
}
return nil
}
}
func testAccCheckComputeInstanceTemplateHasMinCpuPlatform(instanceTemplate *compute.InstanceTemplate, minCpuPlatform string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instanceTemplate.Properties.MinCpuPlatform != minCpuPlatform {
@ -1087,7 +1119,7 @@ resource "google_compute_instance_template" "foobar" {
}`, i, i, i)
}
func testAccComputeInstanceTemplate_guestAccelerator(i string) string {
func testAccComputeInstanceTemplate_guestAccelerator(i string, count uint8) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instance-test-%s"
@ -1110,10 +1142,10 @@ resource "google_compute_instance_template" "foobar" {
}
guest_accelerator {
count = 1
count = %d
type = "nvidia-tesla-k80"
}
}`, i)
}`, i, count)
}
func testAccComputeInstanceTemplate_minCpuPlatform(i string) string {

View File

@ -801,7 +801,7 @@ func TestAccComputeInstance_guestAccelerator(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_guestAccelerator(instanceName),
Config: testAccComputeInstance_guestAccelerator(instanceName, 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceHasGuestAccelerator(&instance, "nvidia-tesla-k80", 1),
@ -817,6 +817,29 @@ func TestAccComputeInstance_guestAccelerator(t *testing.T) {
}
func TestAccComputeInstance_guestAcceleratorSkip(t *testing.T) {
t.Parallel()
var instance compute.Instance
instanceName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_guestAccelerator(instanceName, 0),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceLacksGuestAccelerator(&instance),
),
},
},
})
}
func TestAccComputeInstance_minCpuPlatform(t *testing.T) {
t.Parallel()
@ -1298,6 +1321,16 @@ func testAccCheckComputeInstanceHasGuestAccelerator(instance *compute.Instance,
}
}
func testAccCheckComputeInstanceLacksGuestAccelerator(instance *compute.Instance) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(instance.GuestAccelerators) > 0 {
return fmt.Errorf("Expected no guest accelerators")
}
return nil
}
}
func testAccCheckComputeInstanceHasMinCpuPlatform(instance *compute.Instance, minCpuPlatform string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instance.MinCpuPlatform != minCpuPlatform {
@ -2282,7 +2315,7 @@ resource "google_compute_subnetwork" "inst-test-subnetwork" {
`, instance, network, subnetwork)
}
func testAccComputeInstance_guestAccelerator(instance string) string {
func testAccComputeInstance_guestAccelerator(instance string, count uint8) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
@ -2305,10 +2338,10 @@ resource "google_compute_instance" "foobar" {
}
guest_accelerator {
count = 1
count = %d
type = "nvidia-tesla-k80"
}
}`, instance)
}`, instance, count)
}
func testAccComputeInstance_minCpuPlatform(instance string) string {