Fix autoscaler - was unnecessarily forcing to a single metric (#966)

Fix for autoscalers - we were unnecessarily forcing to a single metric when multiple metrics are supported.
This commit is contained in:
Nathan McKinley 2018-01-16 17:10:13 -08:00 committed by GitHub
parent 950e55a94c
commit 719bbb40aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 118 additions and 69 deletions

View File

@ -34,6 +34,7 @@ var autoscalingPolicy *schema.Schema = &schema.Schema{
"cpu_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
@ -69,6 +70,7 @@ var autoscalingPolicy *schema.Schema = &schema.Schema{
"load_balancing_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
@ -145,6 +147,8 @@ func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
scaler.Description = v.(string)
}
// You can only have 0 or 1 autoscaling policy per autoscaler, but HCL can't easily express
// "optional object", so instead we have "list of maximum size 1".
prefix := "autoscaling_policy.0."
scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{
@ -153,39 +157,29 @@ func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)),
}
// Check that only one autoscaling policy is defined
policyCounter := 0
// This list is MaxItems = 1 as well - you can only have 0 or 1 cpu utilization target per autoscaler.
if _, ok := d.GetOk(prefix + "cpu_utilization"); ok {
if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 {
cpuUtilCount := d.Get(prefix + "cpu_utilization.#").(int)
if cpuUtilCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount)
}
policyCounter++
scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{
UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64),
}
}
}
if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok {
if d.Get(prefix+"metric.0.name") != "" {
policyCounter++
metricCount := d.Get(prefix + "metric.#").(int)
if metricCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount)
}
scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{
{
Metric: d.Get(prefix + "metric.0.name").(string),
UtilizationTarget: d.Get(prefix + "metric.0.target").(float64),
UtilizationTargetType: d.Get(prefix + "metric.0.type").(string),
},
var customMetrics []*compute.AutoscalingPolicyCustomMetricUtilization
if metricCount, ok := d.GetOk(prefix + "metric.#"); ok {
for m := 0; m < metricCount.(int); m++ {
if d.Get(fmt.Sprintf("%smetric.%d.name", prefix, m)) != "" {
customMetrics = append(customMetrics, &compute.AutoscalingPolicyCustomMetricUtilization{
Metric: d.Get(fmt.Sprintf("%smetric.%d.name", prefix, m)).(string),
UtilizationTarget: d.Get(fmt.Sprintf("%smetric.%d.target", prefix, m)).(float64),
UtilizationTargetType: d.Get(fmt.Sprintf("%smetric.%d.type", prefix, m)).(string),
})
}
}
}
scaler.AutoscalingPolicy.CustomMetricUtilizations = customMetrics
if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok {
if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 {
policyCounter++
lbuCount := d.Get(prefix + "load_balancing_utilization.#").(int)
if lbuCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount)
@ -196,10 +190,6 @@ func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
}
}
if policyCounter != 1 {
return nil, fmt.Errorf("One policy must be defined for an autoscaler.")
}
return scaler, nil
}
@ -271,6 +261,8 @@ func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]in
for _, customMetricUtilization := range policy.CustomMetricUtilizations {
metricUtil := make(map[string]interface{})
metricUtil["target"] = customMetricUtilization.UtilizationTarget
metricUtil["name"] = customMetricUtilization.Metric
metricUtil["type"] = customMetricUtilization.UtilizationTargetType
metricUtils = append(metricUtils, metricUtil)
}
policyMap["metric"] = metricUtils

View File

@ -77,6 +77,32 @@ func TestAccComputeAutoscaler_update(t *testing.T) {
})
}
func TestAccComputeAutoscaler_multicondition(t *testing.T) {
t.Parallel()
var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeAutoscalerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeAutoscaler_multicondition(it_name, tp_name, igm_name, autoscaler_name),
Check: testAccCheckComputeAutoscalerMultifunction("google_compute_autoscaler.foobar"),
},
resource.TestStep{
ResourceName: "google_compute_autoscaler.foobar",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@ -124,6 +150,39 @@ func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler)
}
}
func testAccCheckComputeAutoscalerMultifunction(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Autoscaler not found")
}
if found.AutoscalingPolicy.CpuUtilization.UtilizationTarget == 0.5 && found.AutoscalingPolicy.LoadBalancingUtilization.UtilizationTarget == 0.5 {
return nil
}
return fmt.Errorf("Util target for CPU: %d, for LB: %d - should have been 0.5 for each.",
found.AutoscalingPolicy.CpuUtilization.UtilizationTarget,
found.AutoscalingPolicy.LoadBalancingUtilization.UtilizationTarget)
}
}
func testAccCheckComputeAutoscalerUpdated(n string, max int64) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@ -151,7 +210,7 @@ func testAccCheckComputeAutoscalerUpdated(n string, max int64) resource.TestChec
}
}
func testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name string) string {
func testAccComputeAutoscaler_scaffolding(it_name, tp_name, igm_name string) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "%s"
@ -192,7 +251,12 @@ resource "google_compute_instance_group_manager" "foobar" {
base_instance_name = "foobar"
zone = "us-central1-a"
}
`, it_name, tp_name, igm_name)
}
func testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name string) string {
return testAccComputeAutoscaler_scaffolding(it_name, tp_name, igm_name) + fmt.Sprintf(`
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "%s"
@ -208,51 +272,11 @@ resource "google_compute_autoscaler" "foobar" {
}
}
`, it_name, tp_name, igm_name, autoscaler_name)
`, autoscaler_name)
}
func testAccComputeAutoscaler_update(it_name, tp_name, igm_name, autoscaler_name string) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-8-jessie-v20160803"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "%s"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
}
return testAccComputeAutoscaler_scaffolding(it_name, tp_name, igm_name) + fmt.Sprintf(`
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "%s"
@ -268,5 +292,38 @@ resource "google_compute_autoscaler" "foobar" {
}
}
`, it_name, tp_name, igm_name, autoscaler_name)
`, autoscaler_name)
}
func testAccComputeAutoscaler_multicondition(it_name, tp_name, igm_name, autoscaler_name string) string {
return testAccComputeAutoscaler_scaffolding(it_name, tp_name, igm_name) + fmt.Sprintf(`
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "%s"
zone = "us-central1-a"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 10
min_replicas = 1
cooldown_period = 60
cpu_utilization = {
target = 0.5
}
load_balancing_utilization = {
target = 0.5
}
metric {
name = "compute.googleapis.com/instance/network/received_bytes_count"
target = 75
type = "GAUGE"
}
metric {
name = "compute.googleapis.com/instance/network/sent_bytes_count"
target = 50
type = "GAUGE"
}
}
}
`, autoscaler_name)
}