Add generated google_compute_autoscaler and google_compute_region_autoscaler resources. Because this adds the ability to import by more than just the name, we can get rid of all the code in resource_compute_autoscaler.go that tries to find the autoscaler in the provider-specified region. (#1700)

This commit is contained in:
The Magician 2018-06-27 12:49:34 -07:00 committed by Dana Hoffman
parent 97b8419ea5
commit 5976152ad9
7 changed files with 1458 additions and 477 deletions

View File

@ -18,12 +18,14 @@ import "github.com/hashicorp/terraform/helper/schema"
var GeneratedComputeResourcesMap = map[string]*schema.Resource{
"google_compute_address": resourceComputeAddress(),
"google_compute_autoscaler": resourceComputeAutoscaler(),
"google_compute_backend_bucket": resourceComputeBackendBucket(),
"google_compute_disk": resourceComputeDisk(),
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
"google_compute_global_address": resourceComputeGlobalAddress(),
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
"google_compute_region_autoscaler": resourceComputeRegionAutoscaler(),
"google_compute_route": resourceComputeRoute(),
"google_compute_ssl_policy": resourceComputeSslPolicy(),
"google_compute_subnetwork": resourceComputeSubnetwork(),

View File

@ -1,133 +1,152 @@
// ----------------------------------------------------------------------------
//
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
//
// ----------------------------------------------------------------------------
//
// This file is automatically generated by Magic Modules and manual
// changes will be clobbered when the file is regenerated.
//
// Please read more about how to change this file in
// .github/CONTRIBUTING.md.
//
// ----------------------------------------------------------------------------
package google
import (
"fmt"
"log"
"google.golang.org/api/compute/v1"
"reflect"
"strconv"
"time"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
compute "google.golang.org/api/compute/v1"
)
var autoscalingPolicy *schema.Schema = &schema.Schema{
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_replicas": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"max_replicas": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"cooldown_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"cpu_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
},
},
},
"metric": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"load_balancing_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
},
},
},
},
},
}
func resourceComputeAutoscaler() *schema.Resource {
return &schema.Resource{
Create: resourceComputeAutoscalerCreate,
Read: resourceComputeAutoscalerRead,
Update: resourceComputeAutoscalerUpdate,
Delete: resourceComputeAutoscalerDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
State: resourceComputeAutoscalerImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(240 * time.Second),
Update: schema.DefaultTimeout(240 * time.Second),
Delete: schema.DefaultTimeout(240 * time.Second),
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
"autoscaling_policy": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_replicas": {
Type: schema.TypeInt,
Required: true,
},
"min_replicas": {
Type: schema.TypeInt,
Required: true,
},
"cooldown_period": {
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"cpu_utilization": {
Type: schema.TypeList,
Computed: true,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": {
Type: schema.TypeFloat,
Required: true,
},
},
},
},
"metric": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"target": {
Type: schema.TypeFloat,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"}, false),
},
},
},
},
"load_balancing_utilization": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": {
Type: schema.TypeFloat,
Required: true,
},
},
},
},
},
},
},
"target": &schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateGCPName,
},
"target": {
Type: schema.TypeString,
Required: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"description": {
Type: schema.TypeString,
Required: true,
Optional: true,
},
"zone": &schema.Schema{
"zone": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"creation_timestamp": {
Type: schema.TypeString,
Computed: true,
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"autoscaling_policy": autoscalingPolicy,
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"self_link": &schema.Schema{
"self_link": {
Type: schema.TypeString,
Computed: true,
},
@ -135,64 +154,6 @@ func resourceComputeAutoscaler() *schema.Resource {
}
}
func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
// Build the parameter
scaler := &compute.Autoscaler{
Name: d.Get("name").(string),
Target: d.Get("target").(string),
}
// Optional fields
if v, ok := d.GetOk("description"); ok {
scaler.Description = v.(string)
}
// You can only have 0 or 1 autoscaling policy per autoscaler, but HCL can't easily express
// "optional object", so instead we have "list of maximum size 1".
prefix := "autoscaling_policy.0."
scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{
MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)),
MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)),
CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)),
}
// This list is MaxItems = 1 as well - you can only have 0 or 1 cpu utilization target per autoscaler.
if _, ok := d.GetOk(prefix + "cpu_utilization"); ok {
if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 {
scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{
UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64),
}
}
}
var customMetrics []*compute.AutoscalingPolicyCustomMetricUtilization
if metricCount, ok := d.GetOk(prefix + "metric.#"); ok {
for m := 0; m < metricCount.(int); m++ {
if d.Get(fmt.Sprintf("%smetric.%d.name", prefix, m)) != "" {
customMetrics = append(customMetrics, &compute.AutoscalingPolicyCustomMetricUtilization{
Metric: d.Get(fmt.Sprintf("%smetric.%d.name", prefix, m)).(string),
UtilizationTarget: d.Get(fmt.Sprintf("%smetric.%d.target", prefix, m)).(float64),
UtilizationTargetType: d.Get(fmt.Sprintf("%smetric.%d.type", prefix, m)).(string),
})
}
}
}
scaler.AutoscalingPolicy.CustomMetricUtilizations = customMetrics
if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok {
if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 {
lbuCount := d.Get(prefix + "load_balancing_utilization.#").(int)
if lbuCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount)
}
scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{
UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64),
}
}
}
return scaler, nil
}
func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
@ -201,74 +162,75 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e
return err
}
// Get the zone
z, err := getZone(d, config)
obj := make(map[string]interface{})
nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
}
log.Printf("[DEBUG] Loading zone: %s", z)
zone, err := config.clientCompute.Zones.Get(
project, z).Do()
descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config)
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", z, err)
return err
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) {
obj["autoscalingPolicy"] = autoscalingPolicyProp
}
targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) {
obj["target"] = targetProp
}
zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) {
obj["zone"] = zoneProp
}
scaler, err := buildAutoscaler(d)
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers")
if err != nil {
return err
}
op, err := config.clientCompute.Autoscalers.Insert(
project, zone.Name, scaler).Do()
log.Printf("[DEBUG] Creating new Autoscaler: %#v", obj)
res, err := Post(config, url, obj)
if err != nil {
return fmt.Errorf("Error creating Autoscaler: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
// Store the ID now
id, err := replaceVars(d, config, "{{zone}}/{{name}}")
if err != nil {
return fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
err = computeOperationWait(config.clientCompute, op, project, "Creating Autoscaler")
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
return resourceComputeAutoscalerRead(d, meta)
}
waitErr := computeOperationWaitTime(
config.clientCompute, op, project, "Creating Autoscaler",
int(d.Timeout(schema.TimeoutCreate).Minutes()))
func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]interface{} {
result := make([]map[string]interface{}, 0, 1)
policyMap := make(map[string]interface{})
policyMap["max_replicas"] = policy.MaxNumReplicas
policyMap["min_replicas"] = policy.MinNumReplicas
policyMap["cooldown_period"] = policy.CoolDownPeriodSec
if policy.CpuUtilization != nil {
cpuUtils := make([]map[string]interface{}, 0, 1)
cpuUtil := make(map[string]interface{})
cpuUtil["target"] = policy.CpuUtilization.UtilizationTarget
cpuUtils = append(cpuUtils, cpuUtil)
policyMap["cpu_utilization"] = cpuUtils
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return fmt.Errorf("Error waiting to create Autoscaler: %s", waitErr)
}
if policy.LoadBalancingUtilization != nil {
loadBalancingUtils := make([]map[string]interface{}, 0, 1)
loadBalancingUtil := make(map[string]interface{})
loadBalancingUtil["target"] = policy.LoadBalancingUtilization.UtilizationTarget
loadBalancingUtils = append(loadBalancingUtils, loadBalancingUtil)
policyMap["load_balancing_utilization"] = loadBalancingUtils
}
if policy.CustomMetricUtilizations != nil {
metricUtils := make([]map[string]interface{}, 0, len(policy.CustomMetricUtilizations))
for _, customMetricUtilization := range policy.CustomMetricUtilizations {
metricUtil := make(map[string]interface{})
metricUtil["target"] = customMetricUtilization.UtilizationTarget
metricUtil["name"] = customMetricUtilization.Metric
metricUtil["type"] = customMetricUtilization.UtilizationTargetType
metricUtils = append(metricUtils, metricUtil)
}
policyMap["metric"] = metricUtils
}
result = append(result, policyMap)
return result
log.Printf("[DEBUG] Finished creating Autoscaler %q: %#v", d.Id(), res)
return resourceComputeAutoscalerRead(d, meta)
}
func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
@ -279,49 +241,39 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err
return err
}
region, err := getRegion(d, config)
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}")
if err != nil {
return err
}
var getAutoscaler = func(zone string) (interface{}, error) {
return config.clientCompute.Autoscalers.Get(project, zone, d.Id()).Do()
res, err := Get(config, url)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("ComputeAutoscaler %q", d.Id()))
}
var scaler *compute.Autoscaler
var e error
if zone, _ := getZone(d, config); zone != "" {
scaler, e = config.clientCompute.Autoscalers.Get(project, zone, d.Id()).Do()
if e != nil {
return handleNotFoundError(e, d, fmt.Sprintf("Autoscaler %q", d.Id()))
}
} else {
// If the resource was imported, the only info we have is the ID. Try to find the resource
// by searching in the region of the project.
var resource interface{}
resource, e = getZonalResourceFromRegion(getAutoscaler, region, config.clientCompute, project)
if e != nil {
return e
}
scaler = resource.(*compute.Autoscaler)
if err := d.Set("creation_timestamp", flattenComputeAutoscalerCreationTimestamp(res["creationTimestamp"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if scaler == nil {
log.Printf("[WARN] Removing Autoscaler %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
if err := d.Set("name", flattenComputeAutoscalerName(res["name"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
d.Set("project", project)
d.Set("self_link", scaler.SelfLink)
d.Set("name", scaler.Name)
d.Set("target", scaler.Target)
d.Set("zone", GetResourceNameFromSelfLink(scaler.Zone))
d.Set("description", scaler.Description)
if scaler.AutoscalingPolicy != nil {
d.Set("autoscaling_policy", flattenAutoscalingPolicy(scaler.AutoscalingPolicy))
if err := d.Set("description", flattenComputeAutoscalerDescription(res["description"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if err := d.Set("autoscaling_policy", flattenComputeAutoscalerAutoscalingPolicy(res["autoscalingPolicy"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if err := d.Set("target", flattenComputeAutoscalerTarget(res["target"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if err := d.Set("zone", flattenComputeAutoscalerZone(res["zone"])); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
if err := d.Set("project", project); err != nil {
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
return nil
@ -335,26 +287,60 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e
return err
}
zone, err := getZone(d, config)
obj := make(map[string]interface{})
nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
}
descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) {
obj["autoscalingPolicy"] = autoscalingPolicyProp
}
targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) {
obj["target"] = targetProp
}
zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, zoneProp)) {
obj["zone"] = zoneProp
}
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers?autoscaler={{name}}")
if err != nil {
return err
}
scaler, err := buildAutoscaler(d)
log.Printf("[DEBUG] Updating Autoscaler %q: %#v", d.Id(), obj)
res, err := sendRequest(config, "PUT", url, obj)
if err != nil {
return fmt.Errorf("Error updating Autoscaler %q: %s", d.Id(), err)
}
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
op, err := config.clientCompute.Autoscalers.Update(
project, zone, scaler).Do()
if err != nil {
return fmt.Errorf("Error updating Autoscaler: %s", err)
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Updating Autoscaler",
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
err = computeOperationWait(config.clientCompute, op, project, "Updating Autoscaler")
if err != nil {
return err
}
@ -370,21 +356,334 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e
return err
}
zone, err := getZone(d, config)
if err != nil {
return err
}
op, err := config.clientCompute.Autoscalers.Delete(
project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting autoscaler: %s", err)
}
err = computeOperationWait(config.clientCompute, op, project, "Deleting Autoscaler")
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}")
if err != nil {
return err
}
d.SetId("")
log.Printf("[DEBUG] Deleting Autoscaler %q", d.Id())
res, err := Delete(config, url)
if err != nil {
return handleNotFoundError(err, d, "Autoscaler")
}
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Deleting Autoscaler",
int(d.Timeout(schema.TimeoutDelete).Minutes()))
if err != nil {
return err
}
log.Printf("[DEBUG] Finished deleting Autoscaler %q: %#v", d.Id(), res)
return nil
}
func resourceComputeAutoscalerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
parseImportId([]string{"projects/(?P<project>[^/]+)/zones/(?P<zone>[^/]+)/autoscalers/(?P<name>[^/]+)", "(?P<zone>[^/]+)/(?P<name>[^/]+)", "(?P<project>[^/]+)/(?P<zone>[^/]+)/(?P<name>[^/]+)", "(?P<name>[^/]+)"}, d, config)
// Replace import id for the resource id
id, err := replaceVars(d, config, "{{zone}}/{{name}}")
if err != nil {
return nil, fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
return []*schema.ResourceData{d}, nil
}
func flattenComputeAutoscalerCreationTimestamp(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerName(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerDescription(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerAutoscalingPolicy(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["min_replicas"] =
flattenComputeAutoscalerAutoscalingPolicyMinReplicas(original["minNumReplicas"])
transformed["max_replicas"] =
flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(original["maxNumReplicas"])
transformed["cooldown_period"] =
flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(original["coolDownPeriodSec"])
transformed["cpu_utilization"] =
flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(original["cpuUtilization"])
transformed["metric"] =
flattenComputeAutoscalerAutoscalingPolicyMetric(original["customMetricUtilizations"])
transformed["load_balancing_utilization"] =
flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["loadBalancingUtilization"])
return []interface{}{transformed}
}
func flattenComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["target"] =
flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["utilizationTarget"])
return []interface{}{transformed}
}
func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerAutoscalingPolicyMetric(v interface{}) interface{} {
if v == nil {
return v
}
l := v.([]interface{})
transformed := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed = append(transformed, map[string]interface{}{
"name": flattenComputeAutoscalerAutoscalingPolicyMetricName(original["metric"]),
"target": flattenComputeAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"]),
"type": flattenComputeAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"]),
})
}
return transformed
}
func flattenComputeAutoscalerAutoscalingPolicyMetricName(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerAutoscalingPolicyMetricType(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["target"] =
flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"])
return []interface{}{transformed}
}
func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerTarget(v interface{}) interface{} {
return v
}
func flattenComputeAutoscalerZone(v interface{}) interface{} {
return v
}
func expandComputeAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedMinReplicas, err := expandComputeAutoscalerAutoscalingPolicyMinReplicas(original["min_replicas"], d, config)
if err != nil {
return nil, err
}
transformed["minNumReplicas"] = transformedMinReplicas
transformedMaxReplicas, err := expandComputeAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config)
if err != nil {
return nil, err
}
transformed["maxNumReplicas"] = transformedMaxReplicas
transformedCooldownPeriod, err := expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config)
if err != nil {
return nil, err
}
transformed["coolDownPeriodSec"] = transformedCooldownPeriod
transformedCpuUtilization, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config)
if err != nil {
return nil, err
}
transformed["cpuUtilization"] = transformedCpuUtilization
transformedMetric, err := expandComputeAutoscalerAutoscalingPolicyMetric(original["metric"], d, config)
if err != nil {
return nil, err
}
transformed["customMetricUtilizations"] = transformedMetric
transformedLoadBalancingUtilization, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config)
if err != nil {
return nil, err
}
transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization
req = append(req, transformed)
}
return req, nil
}
func expandComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
req = append(req, transformed)
}
return req, nil
}
func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedName, err := expandComputeAutoscalerAutoscalingPolicyMetricName(original["name"], d, config)
if err != nil {
return nil, err
}
transformed["metric"] = transformedName
transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
transformedType, err := expandComputeAutoscalerAutoscalingPolicyMetricType(original["type"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTargetType"] = transformedType
req = append(req, transformed)
}
return req, nil
}
func expandComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
req = append(req, transformed)
}
return req, nil
}
func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeAutoscalerTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseZonalFieldValue("instanceGroupManagers", v.(string), "project", "zone", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for target: %s", err)
}
return "https://www.googleapis.com/compute/v1/" + f.RelativeLink(), nil
}
func expandComputeAutoscalerZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for zone: %s", err)
}
return f.RelativeLink(), nil
}

View File

@ -2,6 +2,7 @@ package google
import (
"fmt"
"strings"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@ -111,8 +112,10 @@ func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error {
continue
}
idParts := strings.Split(rs.Primary.ID, "/")
zone, name := idParts[0], idParts[1]
_, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
config.Project, zone, name).Do()
if err == nil {
return fmt.Errorf("Autoscaler still exists")
}
@ -134,13 +137,15 @@ func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler)
config := testAccProvider.Meta().(*Config)
idParts := strings.Split(rs.Primary.ID, "/")
zone, name := idParts[0], idParts[1]
found, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
config.Project, zone, name).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
if found.Name != name {
return fmt.Errorf("Autoscaler not found")
}
@ -163,13 +168,15 @@ func testAccCheckComputeAutoscalerMultifunction(n string) resource.TestCheckFunc
config := testAccProvider.Meta().(*Config)
idParts := strings.Split(rs.Primary.ID, "/")
zone, name := idParts[0], idParts[1]
found, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
config.Project, zone, name).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
if found.Name != name {
return fmt.Errorf("Autoscaler not found")
}
@ -196,8 +203,10 @@ func testAccCheckComputeAutoscalerUpdated(n string, max int64) resource.TestChec
config := testAccProvider.Meta().(*Config)
idParts := strings.Split(rs.Primary.ID, "/")
zone, name := idParts[0], idParts[1]
ascaler, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
config.Project, zone, name).Do()
if err != nil {
return err
}

View File

@ -1,10 +1,29 @@
// ----------------------------------------------------------------------------
//
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
//
// ----------------------------------------------------------------------------
//
// This file is automatically generated by Magic Modules and manual
// changes will be clobbered when the file is regenerated.
//
// Please read more about how to change this file in
// .github/CONTRIBUTING.md.
//
// ----------------------------------------------------------------------------
package google
import (
"fmt"
"log"
"reflect"
"strconv"
"time"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
compute "google.golang.org/api/compute/v1"
)
func resourceComputeRegionAutoscaler() *schema.Resource {
@ -13,43 +32,120 @@ func resourceComputeRegionAutoscaler() *schema.Resource {
Read: resourceComputeRegionAutoscalerRead,
Update: resourceComputeRegionAutoscalerUpdate,
Delete: resourceComputeRegionAutoscalerDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
State: resourceComputeRegionAutoscalerImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(240 * time.Second),
Update: schema.DefaultTimeout(240 * time.Second),
Delete: schema.DefaultTimeout(240 * time.Second),
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
"autoscaling_policy": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_replicas": {
Type: schema.TypeInt,
Required: true,
},
"min_replicas": {
Type: schema.TypeInt,
Required: true,
},
"cooldown_period": {
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"cpu_utilization": {
Type: schema.TypeList,
Computed: true,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": {
Type: schema.TypeFloat,
Required: true,
},
},
},
},
"metric": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"target": {
Type: schema.TypeFloat,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"}, false),
},
},
},
},
"load_balancing_utilization": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": {
Type: schema.TypeFloat,
Required: true,
},
},
},
},
},
},
},
"target": &schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateGCPName,
},
"target": {
Type: schema.TypeString,
Required: true,
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"autoscaling_policy": autoscalingPolicy,
"description": &schema.Schema{
"description": {
Type: schema.TypeString,
Optional: true,
},
"project": &schema.Schema{
"region": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"creation_timestamp": {
Type: schema.TypeString,
Computed: true,
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"self_link": &schema.Schema{
"self_link": {
Type: schema.TypeString,
Computed: true,
},
@ -65,34 +161,74 @@ func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interfac
return err
}
// Get the region
log.Printf("[DEBUG] Loading region: %s", d.Get("region").(string))
region, err := config.clientCompute.Regions.Get(
project, d.Get("region").(string)).Do()
obj := make(map[string]interface{})
nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config)
if err != nil {
return fmt.Errorf(
"Error loading region '%s': %s", d.Get("region").(string), err)
return err
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
}
descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) {
obj["autoscalingPolicy"] = autoscalingPolicyProp
}
targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) {
obj["target"] = targetProp
}
regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) {
obj["region"] = regionProp
}
scaler, err := buildAutoscaler(d)
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers")
if err != nil {
return err
}
op, err := config.clientCompute.RegionAutoscalers.Insert(
project, region.Name, scaler).Do()
log.Printf("[DEBUG] Creating new RegionAutoscaler: %#v", obj)
res, err := Post(config, url, obj)
if err != nil {
return fmt.Errorf("Error creating Autoscaler: %s", err)
return fmt.Errorf("Error creating RegionAutoscaler: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
// Store the ID now
id, err := replaceVars(d, config, "{{region}}/{{name}}")
if err != nil {
return fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
err = computeOperationWait(config.clientCompute, op, project, "Creating Autoscaler")
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
waitErr := computeOperationWaitTime(
config.clientCompute, op, project, "Creating RegionAutoscaler",
int(d.Timeout(schema.TimeoutCreate).Minutes()))
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return fmt.Errorf("Error waiting to create RegionAutoscaler: %s", waitErr)
}
log.Printf("[DEBUG] Finished creating RegionAutoscaler %q: %#v", d.Id(), res)
return resourceComputeRegionAutoscalerRead(d, meta)
}
@ -104,31 +240,39 @@ func resourceComputeRegionAutoscalerRead(d *schema.ResourceData, meta interface{
return err
}
region, err := getRegion(d, config)
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers/{{name}}")
if err != nil {
return err
}
scaler, err := config.clientCompute.RegionAutoscalers.Get(
project, region, d.Id()).Do()
res, err := Get(config, url)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Autoscaler %q", d.Id()))
return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionAutoscaler %q", d.Id()))
}
if scaler == nil {
log.Printf("[WARN] Removing Autoscaler %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
if err := d.Set("creation_timestamp", flattenComputeRegionAutoscalerCreationTimestamp(res["creationTimestamp"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
d.Set("self_link", scaler.SelfLink)
d.Set("name", scaler.Name)
d.Set("target", scaler.Target)
d.Set("region", GetResourceNameFromSelfLink(scaler.Region))
d.Set("description", scaler.Description)
d.Set("project", project)
if scaler.AutoscalingPolicy != nil {
d.Set("autoscaling_policy", flattenAutoscalingPolicy(scaler.AutoscalingPolicy))
if err := d.Set("name", flattenComputeRegionAutoscalerName(res["name"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("description", flattenComputeRegionAutoscalerDescription(res["description"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("autoscaling_policy", flattenComputeRegionAutoscalerAutoscalingPolicy(res["autoscalingPolicy"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("target", flattenComputeRegionAutoscalerTarget(res["target"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("region", flattenComputeRegionAutoscalerRegion(res["region"])); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
if err := d.Set("project", project); err != nil {
return fmt.Errorf("Error reading RegionAutoscaler: %s", err)
}
return nil
@ -142,23 +286,60 @@ func resourceComputeRegionAutoscalerUpdate(d *schema.ResourceData, meta interfac
return err
}
region := d.Get("region").(string)
obj := make(map[string]interface{})
nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
}
descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) {
obj["autoscalingPolicy"] = autoscalingPolicyProp
}
targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) {
obj["target"] = targetProp
}
regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) {
obj["region"] = regionProp
}
scaler, err := buildAutoscaler(d)
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers?autoscaler={{name}}")
if err != nil {
return err
}
op, err := config.clientCompute.RegionAutoscalers.Update(
project, region, scaler).Do()
log.Printf("[DEBUG] Updating RegionAutoscaler %q: %#v", d.Id(), obj)
res, err := sendRequest(config, "PUT", url, obj)
if err != nil {
return fmt.Errorf("Error updating Autoscaler: %s", err)
return fmt.Errorf("Error updating RegionAutoscaler %q: %s", d.Id(), err)
}
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Updating RegionAutoscaler",
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
err = computeOperationWait(config.clientCompute, op, project, "Updating Autoscaler")
if err != nil {
return err
}
@ -174,18 +355,330 @@ func resourceComputeRegionAutoscalerDelete(d *schema.ResourceData, meta interfac
return err
}
region := d.Get("region").(string)
op, err := config.clientCompute.RegionAutoscalers.Delete(
project, region, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting autoscaler: %s", err)
}
err = computeOperationWait(config.clientCompute, op, project, "Deleting Autoscaler")
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers/{{name}}")
if err != nil {
return err
}
d.SetId("")
log.Printf("[DEBUG] Deleting RegionAutoscaler %q", d.Id())
res, err := Delete(config, url)
if err != nil {
return handleNotFoundError(err, d, "RegionAutoscaler")
}
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Deleting RegionAutoscaler",
int(d.Timeout(schema.TimeoutDelete).Minutes()))
if err != nil {
return err
}
log.Printf("[DEBUG] Finished deleting RegionAutoscaler %q: %#v", d.Id(), res)
return nil
}
func resourceComputeRegionAutoscalerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
parseImportId([]string{"projects/(?P<project>[^/]+)/regions/(?P<region>[^/]+)/autoscalers/(?P<name>[^/]+)", "(?P<region>[^/]+)/(?P<name>[^/]+)", "(?P<project>[^/]+)/(?P<region>[^/]+)/(?P<name>[^/]+)", "(?P<name>[^/]+)"}, d, config)
// Replace import id for the resource id
id, err := replaceVars(d, config, "{{region}}/{{name}}")
if err != nil {
return nil, fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
return []*schema.ResourceData{d}, nil
}
func flattenComputeRegionAutoscalerCreationTimestamp(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerName(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerDescription(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicy(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["min_replicas"] =
flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(original["minNumReplicas"])
transformed["max_replicas"] =
flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(original["maxNumReplicas"])
transformed["cooldown_period"] =
flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(original["coolDownPeriodSec"])
transformed["cpu_utilization"] =
flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(original["cpuUtilization"])
transformed["metric"] =
flattenComputeRegionAutoscalerAutoscalingPolicyMetric(original["customMetricUtilizations"])
transformed["load_balancing_utilization"] =
flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["loadBalancingUtilization"])
return []interface{}{transformed}
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["target"] =
flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["utilizationTarget"])
return []interface{}{transformed}
}
func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}) interface{} {
if v == nil {
return v
}
l := v.([]interface{})
transformed := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed = append(transformed, map[string]interface{}{
"name": flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(original["metric"]),
"target": flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"]),
"type": flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"]),
})
}
return transformed
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["target"] =
flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"])
return []interface{}{transformed}
}
func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerTarget(v interface{}) interface{} {
return v
}
func flattenComputeRegionAutoscalerRegion(v interface{}) interface{} {
return v
}
func expandComputeRegionAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedMinReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(original["min_replicas"], d, config)
if err != nil {
return nil, err
}
transformed["minNumReplicas"] = transformedMinReplicas
transformedMaxReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config)
if err != nil {
return nil, err
}
transformed["maxNumReplicas"] = transformedMaxReplicas
transformedCooldownPeriod, err := expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config)
if err != nil {
return nil, err
}
transformed["coolDownPeriodSec"] = transformedCooldownPeriod
transformedCpuUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config)
if err != nil {
return nil, err
}
transformed["cpuUtilization"] = transformedCpuUtilization
transformedMetric, err := expandComputeRegionAutoscalerAutoscalingPolicyMetric(original["metric"], d, config)
if err != nil {
return nil, err
}
transformed["customMetricUtilizations"] = transformedMetric
transformedLoadBalancingUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config)
if err != nil {
return nil, err
}
transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedName, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricName(original["name"], d, config)
if err != nil {
return nil, err
}
transformed["metric"] = transformedName
transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
transformedType, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricType(original["type"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTargetType"] = transformedType
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config)
if err != nil {
return nil, err
}
transformed["utilizationTarget"] = transformedTarget
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerTarget(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionAutoscalerRegion(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for region: %s", err)
}
return f.RelativeLink(), nil
}

View File

@ -2,6 +2,7 @@ package google
import (
"fmt"
"strings"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@ -80,8 +81,9 @@ func testAccCheckComputeRegionAutoscalerDestroy(s *terraform.State) error {
continue
}
_, err := config.clientCompute.RegionAutoscalers.Get(
config.Project, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
idParts := strings.Split(rs.Primary.ID, "/")
region, name := idParts[0], idParts[1]
_, err := config.clientCompute.RegionAutoscalers.Get(config.Project, region, name).Do()
if err == nil {
return fmt.Errorf("Autoscaler still exists")
}
@ -103,13 +105,14 @@ func testAccCheckComputeRegionAutoscalerExists(n string, ascaler *compute.Autosc
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.RegionAutoscalers.Get(
config.Project, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
idParts := strings.Split(rs.Primary.ID, "/")
region, name := idParts[0], idParts[1]
found, err := config.clientCompute.RegionAutoscalers.Get(config.Project, region, name).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
if found.Name != name {
return fmt.Errorf("Autoscaler not found")
}
@ -132,8 +135,9 @@ func testAccCheckComputeRegionAutoscalerUpdated(n string, max int64) resource.Te
config := testAccProvider.Meta().(*Config)
ascaler, err := config.clientCompute.RegionAutoscalers.Get(
config.Project, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
idParts := strings.Split(rs.Primary.ID, "/")
region, name := idParts[0], idParts[1]
ascaler, err := config.clientCompute.RegionAutoscalers.Get(config.Project, region, name).Do()
if err != nil {
return err
}

View File

@ -1,22 +1,37 @@
---
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file in
# .github/CONTRIBUTING.md.
#
# ----------------------------------------------------------------------------
layout: "google"
page_title: "Google: google_compute_autoscaler"
sidebar_current: "docs-google-compute-autoscaler"
description: |-
Manages an Autoscaler within GCE.
Represents an Autoscaler resource.
---
# google\_compute\_autoscaler
A Compute Engine Autoscaler automatically adds or removes virtual machines from
a managed instance group based on increases or decreases in load. This allows
your applications to gracefully handle increases in traffic and reduces cost
when the need for resources is lower. You just define the autoscaling policy and
the autoscaler performs automatic scaling based on the measured load. For more
information, see [the official
documentation](https://cloud.google.com/compute/docs/autoscaler/) and
[API](https://cloud.google.com/compute/docs/reference/latest/autoscalers)
Represents an Autoscaler resource.
Autoscalers allow you to automatically scale virtual machine instances in
managed instance groups according to an autoscaling policy that you
define.
To get more information about Autoscaler, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers)
* How-to Guides
* [Autoscaling Groups of Instances](https://cloud.google.com/compute/docs/autoscaler/)
## Example Usage
@ -79,77 +94,149 @@ resource "google_compute_autoscaler" "foobar" {
The following arguments are supported:
* `name` - (Required) The name of the autoscaler.
* `name` -
(Required)
Name of the resource. The name must be 1-63 characters long and match
the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
* `autoscaling_policy` -
(Required)
The configuration parameters for the autoscaling algorithm. You can
define one or more of the policies for an autoscaler: cpuUtilization,
customMetricUtilizations, and loadBalancingUtilization.
* `target` - (Required) The full URL to the instance group manager whose size we
control.
If none of these are specified, the default will be to autoscale based
on cpuUtilization to 0.6 or 60%. Structure is documented below.
* `target` -
(Required)
URL of the managed instance group that this autoscaler will scale.
* `zone` - (Required) The zone of the target.
The `autoscaling_policy` block supports:
* `min_replicas` -
(Required)
The minimum number of replicas that the autoscaler can scale down
to. This cannot be less than 0. If not provided, autoscaler will
choose a default value depending on maximum number of instances
allowed.
* `max_replicas` -
(Required)
The maximum number of instances that the autoscaler can scale up
to. This is required when creating or updating an autoscaler. The
maximum number of replicas should not be lower than minimal number
of replicas.
* `cooldown_period` -
(Optional)
The number of seconds that the autoscaler should wait before it
starts collecting information from a new instance. This prevents
the autoscaler from collecting information when the instance is
initializing, during which the collected usage would not be
reliable. The default time autoscaler waits is 60 seconds.
* `autoscaling_policy` - (Required) The parameters of the autoscaling
algorithm. Structure is documented below.
Virtual machine initialization times might vary because of
numerous factors. We recommend that you test how long an
instance may take to initialize. To do this, create an instance
and time the startup process.
* `cpu_utilization` -
(Optional)
Defines the CPU utilization policy that allows the autoscaler to
scale based on the average CPU utilization of a managed instance
group. Structure is documented below.
* `metric` -
(Optional)
Defines the CPU utilization policy that allows the autoscaler to
scale based on the average CPU utilization of a managed instance
group. Structure is documented below.
* `load_balancing_utilization` -
(Optional)
Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
The `cpu_utilization` block supports:
* `target` -
(Required)
The target CPU utilization that the autoscaler should maintain.
Must be a float value in the range (0, 1]. If not specified, the
default is 0.6.
If the CPU level is below the target utilization, the autoscaler
scales down the number of instances until it reaches the minimum
number of instances you specified or until the average CPU of
your instances reaches the target utilization.
If the average CPU is above the target utilization, the autoscaler
scales up until it reaches the maximum number of instances you
specified or until the average utilization reaches the target
utilization.
The `metric` block supports:
* `name` -
(Required)
The identifier (type) of the Stackdriver Monitoring metric.
The metric cannot have negative values.
The metric must have a value type of INT64 or DOUBLE.
* `target` -
(Required)
The target value of the metric that autoscaler should
maintain. This must be a positive value. A utilization
metric scales number of virtual machines handling requests
to increase or decrease proportionally to the metric.
For example, a good metric to use as a utilizationTarget is
www.googleapis.com/compute/instance/network/received_bytes_count.
The autoscaler will work to keep this value constant for each
of the instances.
* `type` -
(Required)
Defines how target utilization value is expressed for a
Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND,
or DELTA_PER_MINUTE.
The `load_balancing_utilization` block supports:
* `target` -
(Required)
Fraction of backend capacity utilization (set in HTTP(s) load
balancing configuration) that autoscaler should maintain. Must
be a positive float value. If not defined, the default is 0.8.
- - -
* `description` - (Optional) An optional textual description of the instance
group manager.
* `project` - (Optional) The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
The `autoscaling_policy` block contains:
* `max_replicas` - (Required) The group will never be larger than this.
* `min_replicas` - (Required) The group will never be smaller than this.
* `cooldown_period` - (Optional) Period to wait between changes. This should be
at least double the time your instances take to start up.
* `cpu_utilization` - (Optional) A policy that scales when the cluster's average
CPU is above or below a given threshold. Structure is documented below.
* `metric` - (Optional) A policy that scales according to Google Cloud
Monitoring metrics Structure is documented below.
* `load_balancing_utilization` - (Optional) A policy that scales when the load
reaches a proportion of a limit defined in the HTTP load balancer. Structure
is documented below.
The `cpu_utilization` block contains:
* `target` - The floating point threshold where CPU utilization should be. E.g.
for 50% one would specify 0.5.
The `metric` block contains (more documentation
[here](https://cloud.google.com/monitoring/api/metrics)):
* `name` - The name of the Google Cloud Monitoring metric to follow, e.g.
`compute.googleapis.com/instance/network/received_bytes_count`
* `type` - Either "cumulative", "delta", or "gauge".
* `target` - The desired metric value per instance. Must be a positive value.
The `load_balancing_utilization` block contains:
* `target` - The floating point threshold where load balancing utilization
should be. E.g. if the load balancer's `maxRatePerInstance` is 10 requests
per second (RPS) then setting this to 0.5 would cause the group to be scaled
such that each instance receives 5 RPS.
* `description` -
(Optional)
An optional description of this resource.
* `zone` -
(Optional)
URL of the zone where the instance group resides.
* `project` (Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are
exported:
In addition to the arguments listed above, the following computed attributes are exported:
* `self_link` - The URL of the created resource.
* `creation_timestamp` -
Creation timestamp in RFC3339 text format.
* `self_link` - The URI of the created resource.
## Timeouts
This resource provides the following
[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
- `create` - Default is 4 minutes.
- `update` - Default is 4 minutes.
- `delete` - Default is 4 minutes.
## Import
Autoscalers can be imported using the `name`, e.g.
Autoscaler can be imported using any of these accepted formats:
```
$ terraform import google_compute_autoscaler.foobar scaler
$ terraform import google_compute_autoscaler.default projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}
$ terraform import google_compute_autoscaler.default {{zone}}/{{name}}
$ terraform import google_compute_autoscaler.default {{project}}/{{zone}}/{{name}}
$ terraform import google_compute_autoscaler.default {{name}}
```

View File

@ -1,22 +1,37 @@
---
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file in
# .github/CONTRIBUTING.md.
#
# ----------------------------------------------------------------------------
layout: "google"
page_title: "Google: google_compute_region_autoscaler"
sidebar_current: "docs-google-compute-region-autoscaler"
description: |-
Manages a Regional Autoscaler within GCE.
Represents an Autoscaler resource.
---
# google\_compute\_region\_autoscaler
A Compute Engine Regional Autoscaler automatically adds or removes virtual machines from
a managed instance group based on increases or decreases in load. This allows
your applications to gracefully handle increases in traffic and reduces cost
when the need for resources is lower. You just define the autoscaling policy and
the autoscaler performs automatic scaling based on the measured load. For more
information, see [the official
documentation](https://cloud.google.com/compute/docs/autoscaler/) and
[API](https://cloud.google.com/compute/docs/reference/latest/regionAutoscalers)
Represents an Autoscaler resource.
Autoscalers allow you to automatically scale virtual machine instances in
managed instance groups according to an autoscaling policy that you
define.
To get more information about RegionAutoscaler, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/regionAutoscalers)
* How-to Guides
* [Autoscaling Groups of Instances](https://cloud.google.com/compute/docs/autoscaler/)
## Example Usage
@ -79,77 +94,149 @@ resource "google_compute_region_autoscaler" "foobar" {
The following arguments are supported:
* `name` - (Required) The name of the autoscaler.
* `name` -
(Required)
Name of the resource. The name must be 1-63 characters long and match
the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
* `autoscaling_policy` -
(Required)
The configuration parameters for the autoscaling algorithm. You can
define one or more of the policies for an autoscaler: cpuUtilization,
customMetricUtilizations, and loadBalancingUtilization.
* `target` - (Required) The full URL to the instance group manager whose size we
control.
If none of these are specified, the default will be to autoscale based
on cpuUtilization to 0.6 or 60%. Structure is documented below.
* `target` -
(Required)
URL of the managed instance group that this autoscaler will scale.
* `region` - (Required) The region of the target.
The `autoscaling_policy` block supports:
* `min_replicas` -
(Required)
The minimum number of replicas that the autoscaler can scale down
to. This cannot be less than 0. If not provided, autoscaler will
choose a default value depending on maximum number of instances
allowed.
* `max_replicas` -
(Required)
The maximum number of instances that the autoscaler can scale up
to. This is required when creating or updating an autoscaler. The
maximum number of replicas should not be lower than minimal number
of replicas.
* `cooldown_period` -
(Optional)
The number of seconds that the autoscaler should wait before it
starts collecting information from a new instance. This prevents
the autoscaler from collecting information when the instance is
initializing, during which the collected usage would not be
reliable. The default time autoscaler waits is 60 seconds.
* `autoscaling_policy` - (Required) The parameters of the autoscaling
algorithm. Structure is documented below.
Virtual machine initialization times might vary because of
numerous factors. We recommend that you test how long an
instance may take to initialize. To do this, create an instance
and time the startup process.
* `cpu_utilization` -
(Optional)
Defines the CPU utilization policy that allows the autoscaler to
scale based on the average CPU utilization of a managed instance
group. Structure is documented below.
* `metric` -
(Optional)
Defines the CPU utilization policy that allows the autoscaler to
scale based on the average CPU utilization of a managed instance
group. Structure is documented below.
* `load_balancing_utilization` -
(Optional)
Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
The `cpu_utilization` block supports:
* `target` -
(Required)
The target CPU utilization that the autoscaler should maintain.
Must be a float value in the range (0, 1]. If not specified, the
default is 0.6.
If the CPU level is below the target utilization, the autoscaler
scales down the number of instances until it reaches the minimum
number of instances you specified or until the average CPU of
your instances reaches the target utilization.
If the average CPU is above the target utilization, the autoscaler
scales up until it reaches the maximum number of instances you
specified or until the average utilization reaches the target
utilization.
The `metric` block supports:
* `name` -
(Required)
The identifier (type) of the Stackdriver Monitoring metric.
The metric cannot have negative values.
The metric must have a value type of INT64 or DOUBLE.
* `target` -
(Required)
The target value of the metric that autoscaler should
maintain. This must be a positive value. A utilization
metric scales number of virtual machines handling requests
to increase or decrease proportionally to the metric.
For example, a good metric to use as a utilizationTarget is
www.googleapis.com/compute/instance/network/received_bytes_count.
The autoscaler will work to keep this value constant for each
of the instances.
* `type` -
(Required)
Defines how target utilization value is expressed for a
Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND,
or DELTA_PER_MINUTE.
The `load_balancing_utilization` block supports:
* `target` -
(Required)
Fraction of backend capacity utilization (set in HTTP(s) load
balancing configuration) that autoscaler should maintain. Must
be a positive float value. If not defined, the default is 0.8.
- - -
* `description` - (Optional) An optional textual description of the instance
group manager.
* `project` - (Optional) The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
The `autoscaling_policy` block contains:
* `max_replicas` - (Required) The group will never be larger than this.
* `min_replicas` - (Required) The group will never be smaller than this.
* `cooldown_period` - (Optional) Period to wait between changes. This should be
at least double the time your instances take to start up.
* `cpu_utilization` - (Optional) A policy that scales when the cluster's average
CPU is above or below a given threshold. Structure is documented below.
* `metric` - (Optional) A policy that scales according to Google Cloud
Monitoring metrics Structure is documented below.
* `load_balancing_utilization` - (Optional) A policy that scales when the load
reaches a proportion of a limit defined in the HTTP load balancer. Structure
is documented below.
The `cpu_utilization` block contains:
* `target` - The floating point threshold where CPU utilization should be. E.g.
for 50% one would specify 0.5.
The `metric` block contains (more documentation
[here](https://cloud.google.com/monitoring/api/metrics)):
* `name` - The name of the Google Cloud Monitoring metric to follow, e.g.
`compute.googleapis.com/instance/network/received_bytes_count`
* `type` - Either "cumulative", "delta", or "gauge".
* `target` - The desired metric value per instance. Must be a positive value.
The `load_balancing_utilization` block contains:
* `target` - The floating point threshold where load balancing utilization
should be. E.g. if the load balancer's `maxRatePerInstance` is 10 requests
per second (RPS) then setting this to 0.5 would cause the group to be scaled
such that each instance receives 5 RPS.
* `description` -
(Optional)
An optional description of this resource.
* `region` -
(Optional)
URL of the region where the instance group resides.
* `project` (Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are
exported:
In addition to the arguments listed above, the following computed attributes are exported:
* `self_link` - The URL of the created resource.
* `creation_timestamp` -
Creation timestamp in RFC3339 text format.
* `self_link` - The URI of the created resource.
## Timeouts
This resource provides the following
[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
- `create` - Default is 4 minutes.
- `update` - Default is 4 minutes.
- `delete` - Default is 4 minutes.
## Import
Autoscalers can be imported using the `name`, e.g.
RegionAutoscaler can be imported using any of these accepted formats:
```
$ terraform import google_compute_region_autoscaler.foobar scaler
$ terraform import google_compute_region_autoscaler.default projects/{{project}}/regions/{{region}}/autoscalers/{{name}}
$ terraform import google_compute_region_autoscaler.default {{region}}/{{name}}
$ terraform import google_compute_region_autoscaler.default {{project}}/{{region}}/{{name}}
$ terraform import google_compute_region_autoscaler.default {{name}}
```