diff --git a/google/resource_compute_instance_group.go b/google/resource_compute_instance_group.go index dc529604..6967dd00 100644 --- a/google/resource_compute_instance_group.go +++ b/google/resource_compute_instance_group.go @@ -3,6 +3,8 @@ package google import ( "fmt" "log" + "reflect" + "sort" "strings" "google.golang.org/api/compute/v1" @@ -94,9 +96,105 @@ func resourceComputeInstanceGroup() *schema.Resource { Computed: true, }, }, + + CustomizeDiff: customDiffInstanceGroupInstancesField, } } +func customDiffInstanceGroupInstancesField(diff *schema.ResourceDiff, meta interface{}) error { + // This deals with an interesting problem that deserves some attention. + // When an instance is destroyed and recreated, its membership in + // instance groups disappears. However, its recreated `self_link` will + // be the same as the `self_link` from before the destroy/recreate. + // Therefore, if some instances which are set in the `instances` field + // are destroyed and recreated, although in *reality* there is a diff + // between the GCP state and the desired state, Terraform cannot *see* + // the diff without running a full Read cycle. There's no Read implicit + // in the `Apply` stage, so we need to trick Terraform into calling + // Update when things like this happen. + + // This function will be called in 3 different states. + // 1) it will be called on a new resource which hasn't been created yet. + // We shouldn't do anything interesting in that case. + // 2) it will be called on an updated resource during "plan" time - that is, + // before anything has actually been done. In that case, we need to show + // a diff on the resource if there's a chance that any of the instances + // will be destroyed and recreated. Fortunately, in this case, the + // upstream logic will show that there is a diff. This will be a + // "Computed" diff - as if we had called diff.SetComputed("instances"), + // and that's a good response. + // 3) it will be called on an updated resource at "apply" time - that is, + // right when we're about to do something with this resource. That + // is designed to check whether there really is a diff on the Computed + // field "instances". Here, we have to get tricky. We need to show + // a diff, and it can't be a Computed diff (`apply` skips `Update` if + // the only diffs are Computed). It also can't be a ForceNew diff, + // because Terraform crashes if there's a ForceNew diff at apply time + // after not seeing one at plan time. We're in a pickle - the Terraform + // state matches our desired state, but is *wrong*. We add a fake item + // to the "instances" set, so that Terraform sees a diff between the + // state and the desired state. + + oldI, newI := diff.GetChange("instances") + oldInstanceSet := oldI.(*schema.Set) + newInstanceSet := newI.(*schema.Set) + oldInstances := convertStringArr(oldInstanceSet.List()) + newInstances := convertStringArr(newInstanceSet.List()) + + log.Printf("[DEBUG] InstanceGroup CustomizeDiff old: %#v, new: %#v", oldInstances, newInstances) + var memberUrls []string + config := meta.(*Config) + // We can't use getProject() or getZone(), because we only have a schema.ResourceDiff, + // not a schema.ResourceData. We'll have to emulate them like this. + project := diff.Get("project").(string) + if project == "" { + project = config.Project + } + zone := diff.Get("zone").(string) + if zone == "" { + project = config.Zone + } + + // We need to see what instances are present in the instance group. There are a few + // possible results. + // 1) The instance group doesn't exist. We don't change the diff in this case - + // if the instance is being created, that's the right thing to do. + // 2) The instance group exists, and the GCP state matches the terraform state. In this + // case, we should do nothing. + // 3) The instance group exists, and the GCP state does not match the terraform state. + // In this case, we add the string "FORCE_UPDATE" to the list of instances, to convince + // Terraform to execute an update even though there's no diff between the terraform + // state and the desired state. + members, err := config.clientCompute.InstanceGroups.ListInstances( + project, zone, diff.Get("name").(string), &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // This is where we'll end up at either plan time or apply time on first creation. + return nil + } else { + // Any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + sort.Strings(memberUrls) + sort.Strings(oldInstances) + sort.Strings(newInstances) + log.Printf("[DEBUG] InstanceGroup members: %#v. OldInstances: %#v", memberUrls, oldInstances) + if !reflect.DeepEqual(memberUrls, oldInstances) && reflect.DeepEqual(newInstances, oldInstances) { + // This is where we'll end up at apply-time only if an instance is + // somehow removed from the set of instances between refresh and update. + newInstancesList := append(newInstances, "FORCE_UPDATE") + diff.SetNew("instances", newInstancesList) + } + // This is where we'll end up if the GCP state matches the Terraform state. + return nil +} + func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { for _, v := range instanceUrls { refs = append(refs, &compute.InstanceReference{ @@ -247,6 +345,10 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) return nil } func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + err := resourceComputeInstanceGroupRead(d, meta) + if err != nil { + return err + } config := meta.(*Config) project, err := getProject(d, config) @@ -264,7 +366,11 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("instances") { // to-do check for no instances - from_, to_ := d.GetChange("instances") + _, to_ := d.GetChange("instances") + // We need to get the current state from d directly because + // it is likely to have been changed by the Read() above. + from_ := d.Get("instances") + to_.(*schema.Set).Remove("FORCE_UPDATE") from := convertStringArr(from_.(*schema.Set).List()) to := convertStringArr(to_.(*schema.Set).List()) @@ -325,7 +431,10 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } if d.HasChange("named_port") { - namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + // Important to fetch via GetChange, because the above Read() will + // have reset the value retrieved via Get() to its current value. + _, namedPorts_ := d.GetChange("named_port") + namedPorts := getNamedPorts(namedPorts_.([]interface{})) namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ NamedPorts: namedPorts, diff --git a/google/resource_compute_instance_group_test.go b/google/resource_compute_instance_group_test.go index ba91f78a..72cd7270 100644 --- a/google/resource_compute_instance_group_test.go +++ b/google/resource_compute_instance_group_test.go @@ -40,6 +40,37 @@ func TestAccComputeInstanceGroup_basic(t *testing.T) { }) } +func TestAccComputeInstanceGroup_recreatedInstances(t *testing.T) { + t.Parallel() + + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + ), + }, + { + Config: testAccComputeInstanceGroup_recreateInstances(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_updated( + "google_compute_instance_group.update", 2, &instanceGroup), + ), + }, + }, + }) +} + func TestAccComputeInstanceGroup_update(t *testing.T) { t.Parallel() @@ -397,6 +428,44 @@ func testAccComputeInstanceGroup_update2(instance string) string { }`, instance, instance) } +func testAccComputeInstanceGroup_recreateInstances(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + count = 2 + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + metadata_startup_script = "echo 'foo'" + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.*.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance) +} + func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "ig_instance" {