mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-07-03 08:42:39 +00:00
Stackdriver monitoring AlertPolicy resource (#2120)
This commit is contained in:
parent
d45626a479
commit
1f76d44ec1
21
google/provider_monitoring_gen.go
Normal file
21
google/provider_monitoring_gen.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
// ----------------------------------------------------------------------------
|
||||
//
|
||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
||||
//
|
||||
// ----------------------------------------------------------------------------
|
||||
//
|
||||
// This file is automatically generated by Magic Modules and manual
|
||||
// changes will be clobbered when the file is regenerated.
|
||||
//
|
||||
// Please read more about how to change this file in
|
||||
// .github/CONTRIBUTING.md.
|
||||
//
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
package google
|
||||
|
||||
import "github.com/hashicorp/terraform/helper/schema"
|
||||
|
||||
var GeneratedMonitoringResourcesMap = map[string]*schema.Resource{
|
||||
"google_monitoring_alert_policy": resourceMonitoringAlertPolicy(),
|
||||
}
|
|
@ -363,6 +363,9 @@ func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v inter
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -411,6 +411,9 @@ func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -629,6 +629,9 @@ func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.Res
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -744,6 +744,9 @@ func expandComputeFirewallAllow(v interface{}, d *schema.ResourceData, config *C
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -779,6 +782,9 @@ func expandComputeFirewallDeny(v interface{}, d *schema.ResourceData, config *Co
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -625,6 +625,9 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *sche
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -545,6 +545,9 @@ func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.ResourceD
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -639,6 +639,9 @@ func expandComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceDa
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
|
@ -524,6 +524,9 @@ func expandFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, co
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -558,6 +561,9 @@ func expandFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, conf
|
|||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
|
1245
google/resource_monitoring_alert_policy.go
Normal file
1245
google/resource_monitoring_alert_policy.go
Normal file
File diff suppressed because it is too large
Load Diff
211
google/resource_monitoring_alert_policy_test.go
Normal file
211
google/resource_monitoring_alert_policy_test.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Stackdriver tests cannot be run in parallel otherwise they will error out with:
|
||||
// Error 503: Too many concurrent edits to the project configuration. Please try again.
|
||||
|
||||
func TestAccMonitoringAlertPolicy_basic(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
filter := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, "ALIGN_RATE", filter),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccMonitoringAlertPolicy_update(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
filter1 := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
|
||||
aligner1 := "ALIGN_RATE"
|
||||
filter2 := `metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"`
|
||||
aligner2 := "ALIGN_MAX"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner1, filter1),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner2, filter2),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccMonitoringAlertPolicy_full(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName1 := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.full",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAlertPolicyDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_monitoring_alert_policy" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
|
||||
url := fmt.Sprintf("https://monitoring.googleapis.com/v3/%s", name)
|
||||
_, err := sendRequest(config, "GET", url, nil)
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Error, alert policy %s still exists", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner, filter string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_monitoring_alert_policy" "basic" {
|
||||
display_name = "%s"
|
||||
enabled = true
|
||||
combiner = "OR"
|
||||
|
||||
conditions = [
|
||||
{
|
||||
display_name = "%s"
|
||||
|
||||
condition_threshold = {
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "%s"
|
||||
},
|
||||
]
|
||||
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
filter = "%s"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
`, alertName, conditionName, aligner, filter)
|
||||
}
|
||||
|
||||
func testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2 string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_monitoring_alert_policy" "full" {
|
||||
display_name = "%s"
|
||||
combiner = "OR"
|
||||
enabled = true
|
||||
|
||||
conditions = [
|
||||
{
|
||||
display_name = "%s"
|
||||
|
||||
condition_threshold = {
|
||||
threshold_value = 50
|
||||
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "ALIGN_RATE"
|
||||
cross_series_reducer = "REDUCE_MEAN"
|
||||
|
||||
group_by_fields = [
|
||||
"metric.label.device_name",
|
||||
"project",
|
||||
"resource.label.instance_id",
|
||||
"resource.label.zone",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
trigger = {
|
||||
percent = 10
|
||||
}
|
||||
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""
|
||||
}
|
||||
},
|
||||
{
|
||||
condition_absent {
|
||||
duration = "3600s"
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\""
|
||||
|
||||
aggregations {
|
||||
alignment_period = "60s"
|
||||
cross_series_reducer = "REDUCE_MEAN"
|
||||
per_series_aligner = "ALIGN_MEAN"
|
||||
|
||||
group_by_fields = [
|
||||
"project",
|
||||
"resource.label.instance_id",
|
||||
"resource.label.zone",
|
||||
]
|
||||
}
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
}
|
||||
|
||||
display_name = "%s"
|
||||
},
|
||||
]
|
||||
}
|
||||
`, alertName, conditionName1, conditionName2)
|
||||
}
|
622
website/docs/r/monitoring_alert_policy.html.markdown
Normal file
622
website/docs/r/monitoring_alert_policy.html.markdown
Normal file
|
@ -0,0 +1,622 @@
|
|||
---
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# This file is automatically generated by Magic Modules and manual
|
||||
# changes will be clobbered when the file is regenerated.
|
||||
#
|
||||
# Please read more about how to change this file in
|
||||
# .github/CONTRIBUTING.md.
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
layout: "google"
|
||||
page_title: "Google: google_monitoring_alert_policy"
|
||||
sidebar_current: "docs-google-monitoring-alert-policy"
|
||||
description: |-
|
||||
A description of the conditions under which some aspect of your system is
|
||||
considered to be "unhealthy" and the ways to notify people or services
|
||||
about this state.
|
||||
---
|
||||
|
||||
# google\_monitoring\_alert\_policy
|
||||
|
||||
A description of the conditions under which some aspect of your system is
|
||||
considered to be "unhealthy" and the ways to notify people or services
|
||||
about this state.
|
||||
|
||||
To get more information about AlertPolicy, see:
|
||||
|
||||
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
|
||||
* How-to Guides
|
||||
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Basic Usage
|
||||
```hcl
|
||||
resource "google_monitoring_alert_policy" "basic" {
|
||||
display_name = "Test Policy Basic"
|
||||
combiner = "OR"
|
||||
conditions = [
|
||||
{
|
||||
display_name = "test condition"
|
||||
condition_threshold {
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "ALIGN_RATE"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
|
||||
* `display_name` -
|
||||
(Required)
|
||||
A short name or phrase used to identify the policy in
|
||||
dashboards, notifications, and incidents. To avoid confusion, don't use
|
||||
the same display name for multiple policies in the same project. The
|
||||
name is limited to 512 Unicode characters.
|
||||
|
||||
* `combiner` -
|
||||
(Required)
|
||||
How to combine the results of multiple conditions to
|
||||
determine if an incident should be opened.
|
||||
|
||||
* `enabled` -
|
||||
(Required)
|
||||
Whether or not the policy is enabled.
|
||||
|
||||
* `conditions` -
|
||||
(Required)
|
||||
A list of conditions for the policy. The conditions are combined by
|
||||
AND or OR according to the combiner field. If the combined conditions
|
||||
evaluate to true, then an incident is created. A policy can have from
|
||||
one to six conditions. Structure is documented below.
|
||||
|
||||
|
||||
The `conditions` block supports:
|
||||
|
||||
* `condition_absent` -
|
||||
(Optional)
|
||||
A condition that checks that a time series
|
||||
continues to receive new data points. Structure is documented below.
|
||||
|
||||
* `name` -
|
||||
The unique resource name for this condition.
|
||||
Its syntax is:
|
||||
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
|
||||
[CONDITION_ID] is assigned by Stackdriver Monitoring when
|
||||
the condition is created as part of a new or updated alerting
|
||||
policy.
|
||||
|
||||
* `condition_threshold` -
|
||||
(Optional)
|
||||
A condition that compares a time series against a
|
||||
threshold. Structure is documented below.
|
||||
|
||||
* `display_name` -
|
||||
(Required)
|
||||
A short name or phrase used to identify the
|
||||
condition in dashboards, notifications, and
|
||||
incidents. To avoid confusion, don't use the same
|
||||
display name for multiple conditions in the same
|
||||
policy.
|
||||
|
||||
|
||||
The `condition_absent` block supports:
|
||||
|
||||
* `aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series as well as how to
|
||||
combine the retrieved time series together
|
||||
(such as when aggregating multiple streams
|
||||
on each resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resrouces).
|
||||
Multiple aggregations are applied in the
|
||||
order specified. Structure is documented below.
|
||||
|
||||
* `trigger` -
|
||||
(Optional)
|
||||
The number/percent of time series for which
|
||||
the comparison must hold in order for the
|
||||
condition to trigger. If unspecified, then
|
||||
the condition will trigger if the comparison
|
||||
is true for any of the time series that have
|
||||
been identified by filter and aggregations. Structure is documented below.
|
||||
|
||||
* `duration` -
|
||||
(Required)
|
||||
The amount of time that a time series must
|
||||
fail to report new data to be considered
|
||||
failing. Currently, only values that are a
|
||||
multiple of a minute--e.g. 60s, 120s, or 300s
|
||||
--are supported.
|
||||
|
||||
* `filter` -
|
||||
(Optional)
|
||||
A filter that identifies which time series
|
||||
should be compared with the threshold.The
|
||||
filter is similar to the one that is
|
||||
specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
|
||||
The `aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
The `trigger` block supports:
|
||||
|
||||
* `percent` -
|
||||
(Optional)
|
||||
The percentage of time series that
|
||||
must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
* `count` -
|
||||
(Optional)
|
||||
The absolute number of time series
|
||||
that must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
The `condition_threshold` block supports:
|
||||
|
||||
* `threshold_value` -
|
||||
(Optional)
|
||||
A value against which to compare the time
|
||||
series.
|
||||
|
||||
* `denominator_filter` -
|
||||
(Optional)
|
||||
A filter that identifies a time series that
|
||||
should be used as the denominator of a ratio
|
||||
that will be compared with the threshold. If
|
||||
a denominator_filter is specified, the time
|
||||
series specified by the filter field will be
|
||||
used as the numerator.The filter is similar
|
||||
to the one that is specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
* `denominator_aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series selected by
|
||||
denominatorFilter as well as how to combine
|
||||
the retrieved time series together (such as
|
||||
when aggregating multiple streams on each
|
||||
resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resources).When
|
||||
computing ratios, the aggregations and
|
||||
denominator_aggregations fields must use the
|
||||
same alignment period and produce time
|
||||
series that have the same periodicity and
|
||||
labels.This field is similar to the one in
|
||||
the MetricService.ListTimeSeries request. It
|
||||
is advisable to use the ListTimeSeries
|
||||
method when debugging this field. Structure is documented below.
|
||||
|
||||
* `duration` -
|
||||
(Required)
|
||||
The amount of time that a time series must
|
||||
violate the threshold to be considered
|
||||
failing. Currently, only values that are a
|
||||
multiple of a minute--e.g., 0, 60, 120, or
|
||||
300 seconds--are supported. If an invalid
|
||||
value is given, an error will be returned.
|
||||
When choosing a duration, it is useful to
|
||||
keep in mind the frequency of the underlying
|
||||
time series data (which may also be affected
|
||||
by any alignments specified in the
|
||||
aggregations field); a good duration is long
|
||||
enough so that a single outlier does not
|
||||
generate spurious alerts, but short enough
|
||||
that unhealthy states are detected and
|
||||
alerted on quickly.
|
||||
|
||||
* `comparison` -
|
||||
(Required)
|
||||
The comparison to apply between the time
|
||||
series (indicated by filter and aggregation)
|
||||
and the threshold (indicated by
|
||||
threshold_value). The comparison is applied
|
||||
on each time series, with the time series on
|
||||
the left-hand side and the threshold on the
|
||||
right-hand side. Only COMPARISON_LT and
|
||||
COMPARISON_GT are supported currently.
|
||||
|
||||
* `trigger` -
|
||||
(Optional)
|
||||
The number/percent of time series for which
|
||||
the comparison must hold in order for the
|
||||
condition to trigger. If unspecified, then
|
||||
the condition will trigger if the comparison
|
||||
is true for any of the time series that have
|
||||
been identified by filter and aggregations,
|
||||
or by the ratio, if denominator_filter and
|
||||
denominator_aggregations are specified. Structure is documented below.
|
||||
|
||||
* `aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series as well as how to
|
||||
combine the retrieved time series together
|
||||
(such as when aggregating multiple streams
|
||||
on each resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resrouces).
|
||||
Multiple aggregations are applied in the
|
||||
order specified.This field is similar to the
|
||||
one in the MetricService.ListTimeSeries
|
||||
request. It is advisable to use the
|
||||
ListTimeSeries method when debugging this
|
||||
field. Structure is documented below.
|
||||
|
||||
* `filter` -
|
||||
(Optional)
|
||||
A filter that identifies which time series
|
||||
should be compared with the threshold.The
|
||||
filter is similar to the one that is
|
||||
specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
|
||||
The `denominator_aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
The `trigger` block supports:
|
||||
|
||||
* `percent` -
|
||||
(Optional)
|
||||
The percentage of time series that
|
||||
must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
* `count` -
|
||||
(Optional)
|
||||
The absolute number of time series
|
||||
that must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
The `aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
- - -
|
||||
|
||||
|
||||
* `notification_channels` -
|
||||
(Optional)
|
||||
Identifies the notification channels to which notifications should be
|
||||
sent when incidents are opened or closed or when new violations occur
|
||||
on an already opened incident. Each element of this array corresponds
|
||||
to the name field in each of the NotificationChannel objects that are
|
||||
returned from the notificationChannels.list method. The syntax of the
|
||||
entries in this field is
|
||||
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
|
||||
|
||||
* `labels` -
|
||||
(Optional)
|
||||
User-supplied key/value data to be used for organizing AlertPolicy objects.
|
||||
* `project` - (Optional) The ID of the project in which the resource belongs.
|
||||
If it is not provided, the provider project is used.
|
||||
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
In addition to the arguments listed above, the following computed attributes are exported:
|
||||
|
||||
|
||||
* `name` -
|
||||
The unique resource name for this policy.
|
||||
Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
|
||||
|
||||
* `creation_record` -
|
||||
A read-only record of the creation of the alerting policy.
|
||||
If provided in a call to create or update, this field will
|
||||
be ignored. Structure is documented below.
|
||||
|
||||
|
||||
The `creation_record` block contains:
|
||||
|
||||
* `mutate_time` -
|
||||
When the change occurred.
|
||||
|
||||
* `mutated_by` -
|
||||
The email address of the user making the change.
|
||||
|
||||
|
||||
## Import
|
||||
|
||||
AlertPolicy can be imported using any of these accepted formats:
|
||||
|
||||
```
|
||||
$ terraform import google_monitoring_alert_policy.default {{name}}
|
||||
```
|
Loading…
Reference in New Issue
Block a user