diff --git a/google/provider_monitoring_gen.go b/google/provider_monitoring_gen.go new file mode 100644 index 00000000..484e473a --- /dev/null +++ b/google/provider_monitoring_gen.go @@ -0,0 +1,21 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import "github.com/hashicorp/terraform/helper/schema" + +var GeneratedMonitoringResourcesMap = map[string]*schema.Resource{ + "google_monitoring_alert_policy": resourceMonitoringAlertPolicy(), +} diff --git a/google/resource_binaryauthorization_attestor.go b/google/resource_binaryauthorization_attestor.go index 7c845735..0e7f6e01 100644 --- a/google/resource_binaryauthorization_attestor.go +++ b/google/resource_binaryauthorization_attestor.go @@ -363,6 +363,9 @@ func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v inter l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_binaryauthorization_policy.go b/google/resource_binaryauthorization_policy.go index 4b466fe7..cf60f5fb 100644 --- a/google/resource_binaryauthorization_policy.go +++ b/google/resource_binaryauthorization_policy.go @@ -411,6 +411,9 @@ func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_compute_autoscaler.go b/google/resource_compute_autoscaler.go index d3186217..43f0672a 100644 --- a/google/resource_compute_autoscaler.go +++ b/google/resource_compute_autoscaler.go @@ -629,6 +629,9 @@ func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.Res l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_compute_firewall.go b/google/resource_compute_firewall.go index b34c0e9d..f871214c 100644 --- a/google/resource_compute_firewall.go +++ b/google/resource_compute_firewall.go @@ -744,6 +744,9 @@ func expandComputeFirewallAllow(v interface{}, d *schema.ResourceData, config *C l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) @@ -779,6 +782,9 @@ func expandComputeFirewallDeny(v interface{}, d *schema.ResourceData, config *Co l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_compute_region_autoscaler.go b/google/resource_compute_region_autoscaler.go index 05046492..93a14c7f 100644 --- a/google/resource_compute_region_autoscaler.go +++ b/google/resource_compute_region_autoscaler.go @@ -625,6 +625,9 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *sche l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_compute_router.go b/google/resource_compute_router.go index 861ac52b..f043388e 100644 --- a/google/resource_compute_router.go +++ b/google/resource_compute_router.go @@ -545,6 +545,9 @@ func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.ResourceD l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_compute_subnetwork.go b/google/resource_compute_subnetwork.go index 03102430..79d83980 100644 --- a/google/resource_compute_subnetwork.go +++ b/google/resource_compute_subnetwork.go @@ -639,6 +639,9 @@ func expandComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceDa l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_filestore_instance.go b/google/resource_filestore_instance.go index a746a145..8feb58d9 100644 --- a/google/resource_filestore_instance.go +++ b/google/resource_filestore_instance.go @@ -524,6 +524,9 @@ func expandFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, co l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) @@ -558,6 +561,9 @@ func expandFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, conf l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { + if raw == nil { + continue + } original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/resource_monitoring_alert_policy.go b/google/resource_monitoring_alert_policy.go new file mode 100644 index 00000000..cc28792b --- /dev/null +++ b/google/resource_monitoring_alert_policy.go @@ -0,0 +1,1245 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceMonitoringAlertPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringAlertPolicyCreate, + Read: resourceMonitoringAlertPolicyRead, + Update: resourceMonitoringAlertPolicyUpdate, + Delete: resourceMonitoringAlertPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringAlertPolicyImport, + }, + + Schema: map[string]*schema.Schema{ + "combiner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"AND", "OR", "AND_WITH_MATCHING_RESOURCE"}, false), + }, + "conditions": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + }, + "condition_absent": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration": { + Type: schema.TypeString, + Required: true, + }, + "aggregations": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), + }, + }, + }, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + }, + "trigger": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + }, + "percent": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "condition_threshold": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "comparison": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"}, false), + }, + "duration": { + Type: schema.TypeString, + Required: true, + }, + "aggregations": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), + }, + }, + }, + }, + "denominator_aggregations": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), + }, + }, + }, + }, + "denominator_filter": { + Type: schema.TypeString, + Optional: true, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + }, + "threshold_value": { + Type: schema.TypeFloat, + Optional: true, + }, + "trigger": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + }, + "percent": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "labels": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "notification_channels": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "creation_record": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mutate_time": { + Type: schema.TypeString, + Computed: true, + }, + "mutated_by": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceMonitoringAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(reflect.ValueOf(combinerProp)) && (ok || !reflect.DeepEqual(v, combinerProp)) { + obj["combiner"] = combinerProp + } + enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(reflect.ValueOf(conditionsProp)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { + obj["conditions"] = conditionsProp + } + notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(reflect.ValueOf(notificationChannelsProp)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { + obj["notificationChannels"] = notificationChannelsProp + } + labelsProp, err := expandMonitoringAlertPolicyLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/projects/{{project}}/alertPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AlertPolicy: %#v", obj) + res, err := sendRequest(config, "POST", url, obj) + if err != nil { + return fmt.Errorf("Error creating AlertPolicy: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AlertPolicy %q: %#v", d.Id(), res) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + d.Set("name", name.(string)) + d.SetId(name.(string)) + + return resourceMonitoringAlertPolicyRead(d, meta) +} + +func resourceMonitoringAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}") + if err != nil { + return err + } + + res, err := sendRequest(config, "GET", url, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("MonitoringAlertPolicy %q", d.Id())) + } + + if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("display_name", flattenMonitoringAlertPolicyDisplayName(res["displayName"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("combiner", flattenMonitoringAlertPolicyCombiner(res["combiner"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("creation_record", flattenMonitoringAlertPolicyCreationRecord(res["creationRecord"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("enabled", flattenMonitoringAlertPolicyEnabled(res["enabled"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("conditions", flattenMonitoringAlertPolicyConditions(res["conditions"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("notification_channels", flattenMonitoringAlertPolicyNotificationChannels(res["notificationChannels"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("labels", flattenMonitoringAlertPolicyLabels(res["labels"])); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + project, err := getProject(d, config) + if err != nil { + return err + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + + return nil +} + +func resourceMonitoringAlertPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, combinerProp)) { + obj["combiner"] = combinerProp + } + enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { + obj["conditions"] = conditionsProp + } + notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { + obj["notificationChannels"] = notificationChannelsProp + } + labelsProp, err := expandMonitoringAlertPolicyLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AlertPolicy %q: %#v", d.Id(), obj) + _, err = sendRequest(config, "PATCH", url, obj) + + if err != nil { + return fmt.Errorf("Error updating AlertPolicy %q: %s", d.Id(), err) + } + + return resourceMonitoringAlertPolicyRead(d, meta) +} + +func resourceMonitoringAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AlertPolicy %q", d.Id()) + res, err := sendRequest(config, "DELETE", url, obj) + if err != nil { + return handleNotFoundError(err, d, "AlertPolicy") + } + + log.Printf("[DEBUG] Finished deleting AlertPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringAlertPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*Config) + + // current import_formats can't import id's with forward slashes in them. + parseImportId([]string{"(?P.+)"}, d, config) + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringAlertPolicyName(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyDisplayName(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCombiner(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCreationRecord(v interface{}) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["mutate_time"] = + flattenMonitoringAlertPolicyCreationRecordMutateTime(original["mutateTime"]) + transformed["mutated_by"] = + flattenMonitoringAlertPolicyCreationRecordMutatedBy(original["mutatedBy"]) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyCreationRecordMutateTime(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCreationRecordMutatedBy(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyEnabled(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditions(v interface{}) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "condition_absent": flattenMonitoringAlertPolicyConditionsConditionAbsent(original["conditionAbsent"]), + "name": flattenMonitoringAlertPolicyConditionsName(original["name"]), + "condition_threshold": flattenMonitoringAlertPolicyConditionsConditionThreshold(original["conditionThreshold"]), + "display_name": flattenMonitoringAlertPolicyConditionsDisplayName(original["displayName"]), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionAbsent(v interface{}) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"]) + transformed["trigger"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"]) + transformed["duration"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"]) + transformed["filter"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"]) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["perSeriesAligner"]), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["groupByFields"]), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignmentPeriod"]), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["crossSeriesReducer"]), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["percent"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"]) + transformed["count"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"]) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsName(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["threshold_value"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["thresholdValue"]) + transformed["denominator_filter"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominatorFilter"]) + transformed["denominator_aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominatorAggregations"]) + transformed["duration"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"]) + transformed["comparison"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"]) + transformed["trigger"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"]) + transformed["aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"]) + transformed["filter"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"]) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["perSeriesAligner"]), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["groupByFields"]), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignmentPeriod"]), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["crossSeriesReducer"]), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["percent"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"]) + transformed["count"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"]) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["perSeriesAligner"]), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["groupByFields"]), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignmentPeriod"]), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["crossSeriesReducer"]), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyNotificationChannels(v interface{}) interface{} { + return v +} + +func flattenMonitoringAlertPolicyLabels(v interface{}) interface{} { + return v +} + +func expandMonitoringAlertPolicyDisplayName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyCombiner(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyEnabled(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditions(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConditionAbsent, err := expandMonitoringAlertPolicyConditionsConditionAbsent(original["condition_absent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionAbsent); val.IsValid() && !isEmptyValue(val) { + transformed["conditionAbsent"] = transformedConditionAbsent + } + + transformedName, err := expandMonitoringAlertPolicyConditionsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedConditionThreshold, err := expandMonitoringAlertPolicyConditionsConditionThreshold(original["condition_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionThreshold); val.IsValid() && !isEmptyValue(val) { + transformed["conditionThreshold"] = transformedConditionThreshold + } + + transformedDisplayName, err := expandMonitoringAlertPolicyConditionsDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { + transformed["aggregations"] = transformedAggregations + } + + transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedCount, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedThresholdValue, err := expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["threshold_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedThresholdValue); val.IsValid() && !isEmptyValue(val) { + transformed["thresholdValue"] = transformedThresholdValue + } + + transformedDenominatorFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominator_filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDenominatorFilter); val.IsValid() && !isEmptyValue(val) { + transformed["denominatorFilter"] = transformedDenominatorFilter + } + + transformedDenominatorAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominator_aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDenominatorAggregations); val.IsValid() && !isEmptyValue(val) { + transformed["denominatorAggregations"] = transformedDenominatorAggregations + } + + transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedComparison, err := expandMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComparison); val.IsValid() && !isEmptyValue(val) { + transformed["comparison"] = transformedComparison + } + + transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { + transformed["aggregations"] = transformedAggregations + } + + transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedCount, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyNotificationChannels(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyLabels(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_monitoring_alert_policy_test.go b/google/resource_monitoring_alert_policy_test.go new file mode 100644 index 00000000..56d561b9 --- /dev/null +++ b/google/resource_monitoring_alert_policy_test.go @@ -0,0 +1,211 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +// Stackdriver tests cannot be run in parallel otherwise they will error out with: +// Error 503: Too many concurrent edits to the project configuration. Please try again. + +func TestAccMonitoringAlertPolicy_basic(t *testing.T) { + + alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + filter := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"` + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlertPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, "ALIGN_RATE", filter), + }, + resource.TestStep{ + ResourceName: "google_monitoring_alert_policy.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccMonitoringAlertPolicy_update(t *testing.T) { + + alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + filter1 := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"` + aligner1 := "ALIGN_RATE" + filter2 := `metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"` + aligner2 := "ALIGN_MAX" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlertPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner1, filter1), + }, + resource.TestStep{ + ResourceName: "google_monitoring_alert_policy.basic", + ImportState: true, + ImportStateVerify: true, + }, + resource.TestStep{ + Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner2, filter2), + }, + resource.TestStep{ + ResourceName: "google_monitoring_alert_policy.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccMonitoringAlertPolicy_full(t *testing.T) { + + alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + conditionName1 := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + conditionName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlertPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2), + }, + resource.TestStep{ + ResourceName: "google_monitoring_alert_policy.full", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAlertPolicyDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_monitoring_alert_policy" { + continue + } + + name := rs.Primary.Attributes["name"] + + url := fmt.Sprintf("https://monitoring.googleapis.com/v3/%s", name) + _, err := sendRequest(config, "GET", url, nil) + + if err == nil { + return fmt.Errorf("Error, alert policy %s still exists", name) + } + } + + return nil +} + +func testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner, filter string) string { + return fmt.Sprintf(` +resource "google_monitoring_alert_policy" "basic" { + display_name = "%s" + enabled = true + combiner = "OR" + + conditions = [ + { + display_name = "%s" + + condition_threshold = { + aggregations = [ + { + alignment_period = "60s" + per_series_aligner = "%s" + }, + ] + + duration = "60s" + comparison = "COMPARISON_GT" + filter = "%s" + } + }, + ] +} +`, alertName, conditionName, aligner, filter) +} + +func testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2 string) string { + return fmt.Sprintf(` +resource "google_monitoring_alert_policy" "full" { + display_name = "%s" + combiner = "OR" + enabled = true + + conditions = [ + { + display_name = "%s" + + condition_threshold = { + threshold_value = 50 + + aggregations = [ + { + alignment_period = "60s" + per_series_aligner = "ALIGN_RATE" + cross_series_reducer = "REDUCE_MEAN" + + group_by_fields = [ + "metric.label.device_name", + "project", + "resource.label.instance_id", + "resource.label.zone", + ] + }, + ] + + duration = "60s" + comparison = "COMPARISON_GT" + + trigger = { + percent = 10 + } + + filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" + } + }, + { + condition_absent { + duration = "3600s" + filter = "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"" + + aggregations { + alignment_period = "60s" + cross_series_reducer = "REDUCE_MEAN" + per_series_aligner = "ALIGN_MEAN" + + group_by_fields = [ + "project", + "resource.label.instance_id", + "resource.label.zone", + ] + } + + trigger { + count = 1 + } + } + + display_name = "%s" + }, + ] +} +`, alertName, conditionName1, conditionName2) +} diff --git a/website/docs/r/monitoring_alert_policy.html.markdown b/website/docs/r/monitoring_alert_policy.html.markdown new file mode 100644 index 00000000..c707f487 --- /dev/null +++ b/website/docs/r/monitoring_alert_policy.html.markdown @@ -0,0 +1,622 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +layout: "google" +page_title: "Google: google_monitoring_alert_policy" +sidebar_current: "docs-google-monitoring-alert-policy" +description: |- + A description of the conditions under which some aspect of your system is + considered to be "unhealthy" and the ways to notify people or services + about this state. +--- + +# google\_monitoring\_alert\_policy + +A description of the conditions under which some aspect of your system is +considered to be "unhealthy" and the ways to notify people or services +about this state. + +To get more information about AlertPolicy, see: + +* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies) +* How-to Guides + * [Official Documentation](https://cloud.google.com/monitoring/alerts/) + +## Example Usage + +### Basic Usage +```hcl +resource "google_monitoring_alert_policy" "basic" { + display_name = "Test Policy Basic" + combiner = "OR" + conditions = [ + { + display_name = "test condition" + condition_threshold { + filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" + duration = "60s" + comparison = "COMPARISON_GT" + aggregations = [ + { + alignment_period = "60s" + per_series_aligner = "ALIGN_RATE" + } + ] + } + } + ] +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `display_name` - + (Required) + A short name or phrase used to identify the policy in + dashboards, notifications, and incidents. To avoid confusion, don't use + the same display name for multiple policies in the same project. The + name is limited to 512 Unicode characters. + +* `combiner` - + (Required) + How to combine the results of multiple conditions to + determine if an incident should be opened. + +* `enabled` - + (Required) + Whether or not the policy is enabled. + +* `conditions` - + (Required) + A list of conditions for the policy. The conditions are combined by + AND or OR according to the combiner field. If the combined conditions + evaluate to true, then an incident is created. A policy can have from + one to six conditions. Structure is documented below. + + +The `conditions` block supports: + +* `condition_absent` - + (Optional) + A condition that checks that a time series + continues to receive new data points. Structure is documented below. + +* `name` - + The unique resource name for this condition. + Its syntax is: + projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + [CONDITION_ID] is assigned by Stackdriver Monitoring when + the condition is created as part of a new or updated alerting + policy. + +* `condition_threshold` - + (Optional) + A condition that compares a time series against a + threshold. Structure is documented below. + +* `display_name` - + (Required) + A short name or phrase used to identify the + condition in dashboards, notifications, and + incidents. To avoid confusion, don't use the same + display name for multiple conditions in the same + policy. + + +The `condition_absent` block supports: + +* `aggregations` - + (Optional) + Specifies the alignment of data points in + individual time series as well as how to + combine the retrieved time series together + (such as when aggregating multiple streams + on each resource to a single stream for each + resource or when aggregating streams across + all members of a group of resrouces). + Multiple aggregations are applied in the + order specified. Structure is documented below. + +* `trigger` - + (Optional) + The number/percent of time series for which + the comparison must hold in order for the + condition to trigger. If unspecified, then + the condition will trigger if the comparison + is true for any of the time series that have + been identified by filter and aggregations. Structure is documented below. + +* `duration` - + (Required) + The amount of time that a time series must + fail to report new data to be considered + failing. Currently, only values that are a + multiple of a minute--e.g. 60s, 120s, or 300s + --are supported. + +* `filter` - + (Optional) + A filter that identifies which time series + should be compared with the threshold.The + filter is similar to the one that is + specified in the + MetricService.ListTimeSeries request (that + call is useful to verify the time series + that will be retrieved / processed) and must + specify the metric type and optionally may + contain restrictions on resource type, + resource labels, and metric labels. This + field may not exceed 2048 Unicode characters + in length. + + +The `aggregations` block supports: + +* `per_series_aligner` - + (Optional) + The approach to be used to align + individual time series. Not all + alignment functions may be applied + to all time series, depending on + the metric type and value type of + the original time series. + Alignment may change the metric + type or the value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +* `group_by_fields` - + (Optional) + The set of fields to preserve when + crossSeriesReducer is specified. + The groupByFields determine how + the time series are partitioned + into subsets prior to applying the + aggregation function. Each subset + contains time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of exactly + one subset. The crossSeriesReducer + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains resource.type. Fields not + specified in groupByFields are + aggregated away. If groupByFields + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If crossSeriesReducer + is not defined, this field is + ignored. + +* `alignment_period` - + (Optional) + The alignment period for per-time + series alignment. If present, + alignmentPeriod must be at least + 60 seconds. After per-time series + alignment, each time series will + contain data points only on the + period boundaries. If + perSeriesAligner is not specified + or equals ALIGN_NONE, then this + field is ignored. If + perSeriesAligner is specified and + does not equal ALIGN_NONE, then + this field must be defined; + otherwise an error is returned. + +* `cross_series_reducer` - + (Optional) + The approach to be used to combine + time series. Not all reducer + functions may be applied to all + time series, depending on the + metric type and the value type of + the original time series. + Reduction may change the metric + type of value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +The `trigger` block supports: + +* `percent` - + (Optional) + The percentage of time series that + must fail the predicate for the + condition to be triggered. + +* `count` - + (Optional) + The absolute number of time series + that must fail the predicate for the + condition to be triggered. + +The `condition_threshold` block supports: + +* `threshold_value` - + (Optional) + A value against which to compare the time + series. + +* `denominator_filter` - + (Optional) + A filter that identifies a time series that + should be used as the denominator of a ratio + that will be compared with the threshold. If + a denominator_filter is specified, the time + series specified by the filter field will be + used as the numerator.The filter is similar + to the one that is specified in the + MetricService.ListTimeSeries request (that + call is useful to verify the time series + that will be retrieved / processed) and must + specify the metric type and optionally may + contain restrictions on resource type, + resource labels, and metric labels. This + field may not exceed 2048 Unicode characters + in length. + +* `denominator_aggregations` - + (Optional) + Specifies the alignment of data points in + individual time series selected by + denominatorFilter as well as how to combine + the retrieved time series together (such as + when aggregating multiple streams on each + resource to a single stream for each + resource or when aggregating streams across + all members of a group of resources).When + computing ratios, the aggregations and + denominator_aggregations fields must use the + same alignment period and produce time + series that have the same periodicity and + labels.This field is similar to the one in + the MetricService.ListTimeSeries request. It + is advisable to use the ListTimeSeries + method when debugging this field. Structure is documented below. + +* `duration` - + (Required) + The amount of time that a time series must + violate the threshold to be considered + failing. Currently, only values that are a + multiple of a minute--e.g., 0, 60, 120, or + 300 seconds--are supported. If an invalid + value is given, an error will be returned. + When choosing a duration, it is useful to + keep in mind the frequency of the underlying + time series data (which may also be affected + by any alignments specified in the + aggregations field); a good duration is long + enough so that a single outlier does not + generate spurious alerts, but short enough + that unhealthy states are detected and + alerted on quickly. + +* `comparison` - + (Required) + The comparison to apply between the time + series (indicated by filter and aggregation) + and the threshold (indicated by + threshold_value). The comparison is applied + on each time series, with the time series on + the left-hand side and the threshold on the + right-hand side. Only COMPARISON_LT and + COMPARISON_GT are supported currently. + +* `trigger` - + (Optional) + The number/percent of time series for which + the comparison must hold in order for the + condition to trigger. If unspecified, then + the condition will trigger if the comparison + is true for any of the time series that have + been identified by filter and aggregations, + or by the ratio, if denominator_filter and + denominator_aggregations are specified. Structure is documented below. + +* `aggregations` - + (Optional) + Specifies the alignment of data points in + individual time series as well as how to + combine the retrieved time series together + (such as when aggregating multiple streams + on each resource to a single stream for each + resource or when aggregating streams across + all members of a group of resrouces). + Multiple aggregations are applied in the + order specified.This field is similar to the + one in the MetricService.ListTimeSeries + request. It is advisable to use the + ListTimeSeries method when debugging this + field. Structure is documented below. + +* `filter` - + (Optional) + A filter that identifies which time series + should be compared with the threshold.The + filter is similar to the one that is + specified in the + MetricService.ListTimeSeries request (that + call is useful to verify the time series + that will be retrieved / processed) and must + specify the metric type and optionally may + contain restrictions on resource type, + resource labels, and metric labels. This + field may not exceed 2048 Unicode characters + in length. + + +The `denominator_aggregations` block supports: + +* `per_series_aligner` - + (Optional) + The approach to be used to align + individual time series. Not all + alignment functions may be applied + to all time series, depending on + the metric type and value type of + the original time series. + Alignment may change the metric + type or the value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +* `group_by_fields` - + (Optional) + The set of fields to preserve when + crossSeriesReducer is specified. + The groupByFields determine how + the time series are partitioned + into subsets prior to applying the + aggregation function. Each subset + contains time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of exactly + one subset. The crossSeriesReducer + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains resource.type. Fields not + specified in groupByFields are + aggregated away. If groupByFields + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If crossSeriesReducer + is not defined, this field is + ignored. + +* `alignment_period` - + (Optional) + The alignment period for per-time + series alignment. If present, + alignmentPeriod must be at least + 60 seconds. After per-time series + alignment, each time series will + contain data points only on the + period boundaries. If + perSeriesAligner is not specified + or equals ALIGN_NONE, then this + field is ignored. If + perSeriesAligner is specified and + does not equal ALIGN_NONE, then + this field must be defined; + otherwise an error is returned. + +* `cross_series_reducer` - + (Optional) + The approach to be used to combine + time series. Not all reducer + functions may be applied to all + time series, depending on the + metric type and the value type of + the original time series. + Reduction may change the metric + type of value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +The `trigger` block supports: + +* `percent` - + (Optional) + The percentage of time series that + must fail the predicate for the + condition to be triggered. + +* `count` - + (Optional) + The absolute number of time series + that must fail the predicate for the + condition to be triggered. + +The `aggregations` block supports: + +* `per_series_aligner` - + (Optional) + The approach to be used to align + individual time series. Not all + alignment functions may be applied + to all time series, depending on + the metric type and value type of + the original time series. + Alignment may change the metric + type or the value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +* `group_by_fields` - + (Optional) + The set of fields to preserve when + crossSeriesReducer is specified. + The groupByFields determine how + the time series are partitioned + into subsets prior to applying the + aggregation function. Each subset + contains time series that have the + same value for each of the + grouping fields. Each individual + time series is a member of exactly + one subset. The crossSeriesReducer + is applied to each subset of time + series. It is not possible to + reduce across different resource + types, so this field implicitly + contains resource.type. Fields not + specified in groupByFields are + aggregated away. If groupByFields + is not specified and all the time + series have the same resource + type, then the time series are + aggregated into a single output + time series. If crossSeriesReducer + is not defined, this field is + ignored. + +* `alignment_period` - + (Optional) + The alignment period for per-time + series alignment. If present, + alignmentPeriod must be at least + 60 seconds. After per-time series + alignment, each time series will + contain data points only on the + period boundaries. If + perSeriesAligner is not specified + or equals ALIGN_NONE, then this + field is ignored. If + perSeriesAligner is specified and + does not equal ALIGN_NONE, then + this field must be defined; + otherwise an error is returned. + +* `cross_series_reducer` - + (Optional) + The approach to be used to combine + time series. Not all reducer + functions may be applied to all + time series, depending on the + metric type and the value type of + the original time series. + Reduction may change the metric + type of value type of the time + series.Time series data must be + aligned in order to perform cross- + time series reduction. If + crossSeriesReducer is specified, + then perSeriesAligner must be + specified and not equal ALIGN_NONE + and alignmentPeriod must be + specified; otherwise, an error is + returned. + +- - - + + +* `notification_channels` - + (Optional) + Identifies the notification channels to which notifications should be + sent when incidents are opened or closed or when new violations occur + on an already opened incident. Each element of this array corresponds + to the name field in each of the NotificationChannel objects that are + returned from the notificationChannels.list method. The syntax of the + entries in this field is + `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` + +* `labels` - + (Optional) + User-supplied key/value data to be used for organizing AlertPolicy objects. +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + + +* `name` - + The unique resource name for this policy. + Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + +* `creation_record` - + A read-only record of the creation of the alerting policy. + If provided in a call to create or update, this field will + be ignored. Structure is documented below. + + +The `creation_record` block contains: + +* `mutate_time` - + When the change occurred. + +* `mutated_by` - + The email address of the user making the change. + + +## Import + +AlertPolicy can be imported using any of these accepted formats: + +``` +$ terraform import google_monitoring_alert_policy.default {{name}} +```