Implement multiple version in instance group manager (#1499)

Hi there,

Here is an attempt to implement canary releases ( #1252 ). This is the first time I write golang and make a terraform contribution, I opened the PR to obtain feedback and advices so please let me know how I can improve this code!

In addition I used `make fmt` to format the code but left some lines bigger than 80 characters, do I need to split them ?

I tested the feature against a project with the following configuration:
```
resource "google_compute_health_check" "mikael-hackathon-healthcheck" {
  name                = "mikael-hackathon-healthcheck"
  check_interval_sec  = 1
  timeout_sec         = 1
  healthy_threshold   = 2
  unhealthy_threshold = 10

  http_health_check {
    request_path = "/"
    port         = "80"
  }
}

resource "google_compute_instance_template" "mikael-hackaton-template" {
  name_prefix = "mikael-hackaton-"
  description = "This template is used to create app server instances."

  tags = ["loadbalanced", "internal-web", "hackaton"]

  labels = {
    environment = "hackaton"
  }

  instance_description = "Hackaton demo rolling upgrade"
  machine_type         = "n1-standard-1"
  can_ip_forward       = false

  scheduling {
    automatic_restart   = true
    on_host_maintenance = "MIGRATE"
  }

  disk {
    source_image = "debian-cloud/debian-9"
    disk_type    = "pd-standard"
    disk_size_gb = 20
    auto_delete  = true
    boot         = true
  }

  network_interface {
    network       = "default"
    access_config = {}
  }

  service_account {
    email  = "${google_service_account.mikael-hackaton.email}"
    scopes = ["cloud-platform"]
  }

  lifecycle {
    create_before_destroy = true
  }

  metadata_startup_script = "apt-get update && apt-get install -y apache2 && echo I am stable version at $(hostname) > /var/www/html/index.html"
}

resource "google_compute_instance_template" "mikael-hackaton-template-canary" {
  name_prefix = "mikael-hackaton-canary"
  description = "This template is used to create app server instances."

  tags = ["loadbalanced", "internal-web", "hackaton"]

  labels = {
    environment = "hackaton"
  }

  instance_description = "Hackaton demo rolling upgrade"
  machine_type         = "n1-standard-1"
  can_ip_forward       = false

  scheduling {
    automatic_restart   = true
    on_host_maintenance = "MIGRATE"
  }

  disk {
    source_image = "debian-cloud/debian-9"
    disk_type    = "pd-standard"
    disk_size_gb = 20
    auto_delete  = true
    boot         = true
  }

  network_interface {
    network       = "default"
    access_config = {}
  }

  service_account {
    email  = "${google_service_account.mikael-hackaton.email}"
    scopes = ["cloud-platform"]
  }

  lifecycle {
    create_before_destroy = true
  }

  metadata_startup_script = "apt-get update && apt-get install -y apache2 && echo I am a canary at $(hostname) > /var/www/html/index.html"
}

resource "google_compute_target_pool" "mikael-hackaton-target-pool" {
  name = "mikael-hackaton-target-pool"
}

resource "google_compute_instance_group_manager" "mikael-hackaton-manager" {
  name = "mikael-hackaton-manager"
  base_instance_name = "mikael-hackaton"
  #instance_template = "${google_compute_instance_template.mikael-hackaton-template.self_link}"
  update_strategy   = "ROLLING_UPDATE"
  zone              = "${var.zone}"
  target_pools = ["${google_compute_target_pool.mikael-hackaton-target-pool.self_link}"]
  target_size  = 5

  version {
    name = "primary"
    instance_template = "${google_compute_instance_template.mikael-hackaton-template.self_link}"
  }

  version {
    name = "canary"
    instance_template = "${google_compute_instance_template.mikael-hackaton-template-canary.self_link}"
    target_size_fixed = 1
  }

  named_port {
    name = "http"
    port = 80
  }

  auto_healing_policies {
    health_check      = "${google_compute_health_check.mikael-hackathon-healthcheck.self_link}"
    initial_delay_sec = 10
  }

  rolling_update_policy {
    type                    = "PROACTIVE"
    minimal_action          = "REPLACE"
    max_surge_percent       = 100
    max_unavailable_percent = 50
    min_ready_sec           = 5
  }
}
```
This commit is contained in:
Mikaël Gibert 2018-06-05 00:34:48 +02:00 committed by Dana Hoffman
parent ac6e30d335
commit d60381a693
3 changed files with 399 additions and 79 deletions

View File

@ -33,10 +33,50 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
"instance_template": &schema.Schema{
Type: schema.TypeString,
Required: true,
Optional: true,
DiffSuppressFunc: compareSelfLinkRelativePaths,
},
"version": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"instance_template": &schema.Schema{
Type: schema.TypeString,
Required: true,
DiffSuppressFunc: compareSelfLinkRelativePaths,
},
"target_size": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"fixed": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"percent": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(0, 100),
},
},
},
},
},
},
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
@ -138,6 +178,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
},
},
},
"rolling_update_policy": &schema.Schema{
Type: schema.TypeList,
Optional: true,
@ -192,6 +233,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
},
},
},
"wait_for_instances": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
@ -254,6 +296,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte
NamedPorts: getNamedPortsBeta(d.Get("named_port").([]interface{})),
TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)),
AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})),
Versions: expandVersions(d.Get("version").([]interface{})),
// Force send TargetSize to allow a value of 0.
ForceSendFields: []string{"TargetSize"},
}
@ -290,6 +333,31 @@ func flattenNamedPortsBeta(namedPorts []*computeBeta.NamedPort) []map[string]int
}
func flattenVersions(versions []*computeBeta.InstanceGroupManagerVersion) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(versions))
for _, version := range versions {
versionMap := make(map[string]interface{})
versionMap["name"] = version.Name
versionMap["instance_template"] = ConvertSelfLinkToV1(version.InstanceTemplate)
versionMap["target_size"] = flattenFixedOrPercent(version.TargetSize)
result = append(result, versionMap)
}
return result
}
func flattenFixedOrPercent(fixedOrPercent *computeBeta.FixedOrPercent) []map[string]interface{} {
result := make(map[string]interface{})
if value := fixedOrPercent.Percent; value > 0 {
result["percent"] = value
} else if value := fixedOrPercent.Fixed; value > 0 {
result["fixed"] = fixedOrPercent.Fixed
} else {
return []map[string]interface{}{}
}
return []map[string]interface{}{result}
}
func getManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) {
config := meta.(*Config)
@ -352,6 +420,9 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
d.Set("base_instance_name", manager.BaseInstanceName)
d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate))
if err := d.Set("version", flattenVersions(manager.Versions)); err != nil {
return err
}
d.Set("name", manager.Name)
d.Set("zone", GetResourceNameFromSelfLink(manager.Zone))
d.Set("description", manager.Description)
@ -385,6 +456,63 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
return nil
}
// Updates an instance group manager by applying an update strategy (REPLACE, RESTART) respecting a rolling update policy (availability settings,
// interval between updates, and particularly, the type of update PROACTIVE or OPPORTUNISTIC because updates performed by API are considered
// OPPORTUNISTIC by default)
func performUpdate(config *Config, id string, updateStrategy string, rollingUpdatePolicy *computeBeta.InstanceGroupManagerUpdatePolicy, versions []*computeBeta.InstanceGroupManagerVersion, project string, zone string) error {
if updateStrategy == "RESTART" {
managedInstances, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances(project, zone, id).Do()
if err != nil {
return fmt.Errorf("Error getting instance group managers instances: %s", err)
}
managedInstanceCount := len(managedInstances.ManagedInstances)
instances := make([]string, managedInstanceCount)
for i, v := range managedInstances.ManagedInstances {
instances[i] = v.Instance
}
recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{
Instances: instances,
}
op, err := config.clientComputeBeta.InstanceGroupManagers.RecreateInstances(project, zone, id, recreateInstances).Do()
if err != nil {
return fmt.Errorf("Error restarting instance group managers instances: %s", err)
}
// Wait for the operation to complete
err = computeSharedOperationWaitTime(config.clientCompute, op, project, managedInstanceCount*4, "Restarting InstanceGroupManagers instances")
if err != nil {
return err
}
}
if updateStrategy == "ROLLING_UPDATE" {
// UpdatePolicy is set for InstanceGroupManager on update only, because it is only relevant for `Patch` calls.
// Other tools(gcloud and UI) capable of executing the same `ROLLING UPDATE` call
// expect those values to be provided by user as part of the call
// or provide their own defaults without respecting what was previously set on UpdateManager.
// To follow the same logic, we provide policy values on relevant update change only.
manager := &computeBeta.InstanceGroupManager{
UpdatePolicy: rollingUpdatePolicy,
Versions: versions,
}
op, err := config.clientComputeBeta.InstanceGroupManagers.Patch(project, zone, id, manager).Do()
if err != nil {
return fmt.Errorf("Error updating managed group instances: %s", err)
}
err = computeSharedOperationWait(config.clientCompute, op, project, "Updating managed group instances")
if err != nil {
return err
}
}
return nil
}
func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
@ -430,81 +558,6 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte
d.SetPartial("target_pools")
}
// If instance_template changes then update
if d.HasChange("instance_template") {
// Build the parameter
setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{
InstanceTemplate: d.Get("instance_template").(string),
}
op, err := config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate(
project, zone, d.Id(), setInstanceTemplate).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
}
// Wait for the operation to complete
err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager")
if err != nil {
return err
}
if d.Get("update_strategy").(string) == "RESTART" {
managedInstances, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances(
project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error getting instance group managers instances: %s", err)
}
managedInstanceCount := len(managedInstances.ManagedInstances)
instances := make([]string, managedInstanceCount)
for i, v := range managedInstances.ManagedInstances {
instances[i] = v.Instance
}
recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{
Instances: instances,
}
op, err = config.clientComputeBeta.InstanceGroupManagers.RecreateInstances(
project, zone, d.Id(), recreateInstances).Do()
if err != nil {
return fmt.Errorf("Error restarting instance group managers instances: %s", err)
}
// Wait for the operation to complete
err = computeSharedOperationWaitTime(config.clientCompute, op, project, managedInstanceCount*4, "Restarting InstanceGroupManagers instances")
if err != nil {
return err
}
}
if d.Get("update_strategy").(string) == "ROLLING_UPDATE" {
// UpdatePolicy is set for InstanceGroupManager on update only, because it is only relevant for `Patch` calls.
// Other tools(gcloud and UI) capable of executing the same `ROLLING UPDATE` call
// expect those values to be provided by user as part of the call
// or provide their own defaults without respecting what was previously set on UpdateManager.
// To follow the same logic, we provide policy values on relevant update change only.
manager := &computeBeta.InstanceGroupManager{
UpdatePolicy: expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{})),
}
op, err = config.clientComputeBeta.InstanceGroupManagers.Patch(
project, zone, d.Id(), manager).Do()
if err != nil {
return fmt.Errorf("Error updating managed group instances: %s", err)
}
err = computeSharedOperationWait(config.clientCompute, op, project, "Updating managed group instances")
if err != nil {
return err
}
}
d.SetPartial("instance_template")
}
// If named_port changes then update:
if d.HasChange("named_port") {
@ -572,6 +625,44 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte
d.SetPartial("auto_healing_policies")
}
// If instance_template changes then update
if d.HasChange("instance_template") {
// Build the parameter
setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{
InstanceTemplate: d.Get("instance_template").(string),
}
op, err := config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate(project, zone, d.Id(), setInstanceTemplate).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
}
// Wait for the operation to complete
err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager")
if err != nil {
return err
}
updateStrategy := d.Get("update_strategy").(string)
rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{}))
err = performUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, nil, project, zone)
d.SetPartial("instance_template")
}
// If version changes then update
if d.HasChange("version") {
updateStrategy := d.Get("update_strategy").(string)
rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{}))
versions := expandVersions(d.Get("version").([]interface{}))
err = performUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, versions, project, zone)
if err != nil {
return err
}
d.SetPartial("version")
}
d.Partial(false)
return resourceComputeInstanceGroupManagerRead(d, meta)
@ -647,6 +738,37 @@ func expandAutoHealingPolicies(configured []interface{}) []*computeBeta.Instance
return autoHealingPolicies
}
func expandVersions(configured []interface{}) []*computeBeta.InstanceGroupManagerVersion {
versions := make([]*computeBeta.InstanceGroupManagerVersion, 0, len(configured))
for _, raw := range configured {
data := raw.(map[string]interface{})
version := computeBeta.InstanceGroupManagerVersion{
Name: data["name"].(string),
InstanceTemplate: data["instance_template"].(string),
TargetSize: expandFixedOrPercent(data["target_size"].([]interface{})),
}
versions = append(versions, &version)
}
return versions
}
func expandFixedOrPercent(configured []interface{}) *computeBeta.FixedOrPercent {
fixedOrPercent := &computeBeta.FixedOrPercent{}
for _, raw := range configured {
data := raw.(map[string]interface{})
if percent := data["percent"]; percent.(int) > 0 {
fixedOrPercent.Percent = int64(percent.(int))
} else {
fixedOrPercent.Fixed = int64(data["fixed"].(int))
fixedOrPercent.ForceSendFields = []string{"Fixed"}
}
}
return fixedOrPercent
}
func expandUpdatePolicy(configured []interface{}) *computeBeta.InstanceGroupManagerUpdatePolicy {
updatePolicy := &computeBeta.InstanceGroupManagerUpdatePolicy{}

View File

@ -269,6 +269,36 @@ func TestAccInstanceGroupManager_separateRegions(t *testing.T) {
})
}
func TestAccInstanceGroupManager_versions(t *testing.T) {
t.Parallel()
var manager computeBeta.InstanceGroupManager
primaryTemplate := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
canaryTemplate := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccInstanceGroupManager_versions(primaryTemplate, canaryTemplate, igm),
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerBetaExists("google_compute_instance_group_manager.igm-basic", &manager),
testAccCheckInstanceGroupManagerVersions("google_compute_instance_group_manager.igm-basic", primaryTemplate, canaryTemplate),
),
},
resource.TestStep{
ResourceName: "google_compute_instance_group_manager.igm-basic",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccInstanceGroupManager_autoHealingPolicies(t *testing.T) {
t.Parallel()
@ -492,6 +522,42 @@ func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, i
}
}
func testAccCheckInstanceGroupManagerVersions(n string, primaryTemplate string, canaryTemplate string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
manager, err := config.clientComputeBeta.InstanceGroupManagers.Get(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if len(manager.Versions) != 2 {
return fmt.Errorf("Expected # of versions to be 2, got %d", len(manager.Versions))
}
primaryVersion := manager.Versions[0]
if !strings.Contains(primaryVersion.InstanceTemplate, primaryTemplate) {
return fmt.Errorf("Expected string \"%s\" to appear in \"%s\"", primaryTemplate, primaryVersion.InstanceTemplate)
}
canaryVersion := manager.Versions[1]
if !strings.Contains(canaryVersion.InstanceTemplate, canaryTemplate) {
return fmt.Errorf("Expected string \"%s\" to appear in \"%s\"", canaryTemplate, canaryVersion.InstanceTemplate)
}
return nil
}
}
func testAccCheckInstanceGroupManagerAutoHealingPolicies(n, hck string, initialDelaySec int64) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@ -1121,6 +1187,73 @@ resource "google_compute_http_health_check" "zero" {
`, template, target, igm, hck)
}
func testAccInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "igm-primary" {
name = "%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-8-jessie-v20160803"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_instance_template" "igm-canary" {
name = "%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-8-jessie-v20160803"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_instance_group_manager" "igm-basic" {
description = "Terraform test instance group manager"
name = "%s"
base_instance_name = "igm-basic"
zone = "us-central1-c"
target_size = 2
version {
name = "primary"
instance_template = "${google_compute_instance_template.igm-primary.self_link}"
}
version {
name = "canary"
instance_template = "${google_compute_instance_template.igm-canary.self_link}"
target_size {
fixed = 1
}
}
}
`, primaryTemplate, canaryTemplate, igm)
}
// This test is to make sure that a single version resource can link to a versioned resource
// without perpetual diffs because the self links mismatch.
// Once auto_healing_policies is no longer beta, we will need to use a new field or resource

View File

@ -15,7 +15,7 @@ and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroupMa
~> **Note:** Use [google_compute_region_instance_group_manager](/docs/providers/google/r/compute_region_instance_group_manager.html) to create a regional (multi-zone) instance group manager.
## Example Usage
## Example Usage with top level instance template
```hcl
resource "google_compute_health_check" "autohealing" {
@ -54,6 +54,30 @@ resource "google_compute_instance_group_manager" "appserver" {
}
```
## Example Usage with multiple Versions
```hcl
resource "google_compute_instance_group_manager" "appserver" {
name = "appserver-igm"
base_instance_name = "app"
update_strategy = "NONE"
zone = "us-central1-a"
target_size = 5
version {
instance_template = "${google_compute_instance_template.appserver.self_link}"
}
version {
instance_template = "${google_compute_instance_template.appserver-canary.self_link}"
target_size {
fixed = 1
}
}
}
```
## Argument Reference
The following arguments are supported:
@ -65,8 +89,15 @@ The following arguments are supported:
appending a hyphen and a random four-character string to the base instance
name.
* `instance_template` - (Required) The full URL to an instance template from
which all new instances will be created.
* `instance_template` - (Optional) The full URL to an instance template from
which all new instances will be created. Conflicts with `version` (see [documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups#relationship_between_instancetemplate_properties_for_a_managed_instance_group))
* `version` - (Optional) Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Conflicts with `instance_template`. Structure is documented below. Beware that
exactly one version must not specify a target size. It means that versions with
a target size will respect the setting, and the one without target size will
be applied to all remaining Instances (top level target_size - each version target_size).
* `name` - (Required) The name of the instance group manager. Must be 1-63
characters long and comply with
@ -154,6 +185,40 @@ The **auto_healing_policies** block supports:
* `initial_delay_sec` - (Required) The number of seconds that the managed instance group waits before
it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600.
The **version** block supports:
```hcl
version {
name = "appserver-canary"
instance_template = "${google_compute_instance_template.appserver-canary.self_link}"
target_size {
fixed = 1
}
}
```
```hcl
version {
name = "appserver-canary"
instance_template = "${google_compute_instance_template.appserver-canary.self_link}"
target_size {
percent = 20
}
}
```
* `name` - (Required) - Version name.
* `instance_template` - (Required) - The full URL to an instance template from which all new instances of this version will be created.
* `target_size` - (Optional) - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
The **target_size** block supports:
* `fixed` - (Optional), The number of instances which are managed for this version. Conflicts with `percent`.
* `percent` - (Optional), The number of instances (calculated as percentage) which are managed for this version. Conflicts with `fixed`.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are