mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-01 16:21:06 +00:00
Merge branch 'master' into paddy_igm_deprecation
This commit is contained in:
commit
8e7347af4e
@ -1,16 +1,23 @@
|
||||
## 1.19.0 (Unreleased)
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES:
|
||||
* all: beta fields have been deprecated in favor of the new `google-beta` provider. See https://terraform.io/docs/providers/google/provider_versions.html for more info. [GH-2152]
|
||||
* bigtable: `google_bigtable_instance` deprecated the `cluster_id`, `zone`, `num_nodes`, and `storage_type` fields, creating a `cluster` block containing those fields instead. [GH-2161]
|
||||
* cloudfunctions: `google_cloudfunctions_function` deprecated `trigger_bucket` and `trigger_topic` in favor of the new `event_trigger` field, and deprecated `retry_on_failure` in favor of the `event_trigger.failure_policy.retry` field. [GH-2158]
|
||||
* compute: `google_compute_instance`, `google_compute_instance_template`, `google_compute_instance_from_template` have had the `network_interface.address` field deprecated and the `network_interface.network_ip` field undeprecated to better match the API. Terraform configurations should migrate from `network_interface.address` to `network_interface.network_ip`. [GH-2096]
|
||||
* compute: `google_compute_instance`, `google_compute_instance_from_template` have had the `network_interface.0.access_config.0.assigned_nat_ip` field deprecated. Please use `network_interface.0.access_config.0.nat_ip` instead.
|
||||
* project: `google_project`'s `app_engine` sub-block has been deprecated. Please use the `google_app_engine_app` resource instead. Changing between the two should not force project re-creation. [GH-2147]
|
||||
|
||||
FEATURES:
|
||||
* **New Datasource**: `google_compute_instance` [GH-1906]
|
||||
* **New Resource**: `google_compute_interconnect_attachment` [GH-1140]
|
||||
* **New Resource**: `google_filestore_instance` [GH-2088]
|
||||
* **New Resource**: `google_app_engine_application` [GH-2147]
|
||||
|
||||
ENHANCEMENTS:
|
||||
* container: Add `enable_tpu` flag to google_container_cluster [GH-1974]
|
||||
* dns: `google_dns_managed_zone` is now importable [GH-1944]
|
||||
* dns: `google_dns_managed_zone` is now entirely GA [GH-2154]
|
||||
* runtimeconfig: `google_runtimeconfig_config` and `google_runtimeconfig_variable` are now importable. [GH-2054]
|
||||
* services: containeranalysis.googleapis.com can now be enabled [GH-2095]
|
||||
|
||||
|
@ -20,6 +20,7 @@ func dataSourceGoogleContainerEngineVersions() *schema.Resource {
|
||||
Optional: true,
|
||||
},
|
||||
"region": {
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"zone"},
|
||||
|
@ -12,23 +12,26 @@ import (
|
||||
|
||||
var IamComputeSubnetworkSchema = map[string]*schema.Schema{
|
||||
"subnetwork": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -142,6 +142,7 @@ var schemaNodeConfig = &schema.Schema{
|
||||
},
|
||||
|
||||
"taint": {
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
@ -169,10 +170,11 @@ var schemaNodeConfig = &schema.Schema{
|
||||
},
|
||||
|
||||
"workload_metadata_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
MaxItems: 1,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"node_metadata": {
|
||||
|
@ -106,7 +106,9 @@ func Provider() terraform.ResourceProvider {
|
||||
GeneratedFilestoreResourcesMap,
|
||||
GeneratedRedisResourcesMap,
|
||||
GeneratedResourceManagerResourcesMap,
|
||||
GeneratedMonitoringResourcesMap,
|
||||
map[string]*schema.Resource{
|
||||
"google_app_engine_application": resourceAppEngineApplication(),
|
||||
"google_bigquery_dataset": resourceBigQueryDataset(),
|
||||
"google_bigquery_table": resourceBigQueryTable(),
|
||||
"google_bigtable_instance": resourceBigtableInstance(),
|
||||
|
21
google/provider_monitoring_gen.go
Normal file
21
google/provider_monitoring_gen.go
Normal file
@ -0,0 +1,21 @@
|
||||
// ----------------------------------------------------------------------------
|
||||
//
|
||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
||||
//
|
||||
// ----------------------------------------------------------------------------
|
||||
//
|
||||
// This file is automatically generated by Magic Modules and manual
|
||||
// changes will be clobbered when the file is regenerated.
|
||||
//
|
||||
// Please read more about how to change this file in
|
||||
// .github/CONTRIBUTING.md.
|
||||
//
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
package google
|
||||
|
||||
import "github.com/hashicorp/terraform/helper/schema"
|
||||
|
||||
var GeneratedMonitoringResourcesMap = map[string]*schema.Resource{
|
||||
"google_monitoring_alert_policy": resourceMonitoringAlertPolicy(),
|
||||
}
|
285
google/resource_app_engine_application.go
Normal file
285
google/resource_app_engine_application.go
Normal file
@ -0,0 +1,285 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/customdiff"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
appengine "google.golang.org/api/appengine/v1"
|
||||
)
|
||||
|
||||
func resourceAppEngineApplication() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAppEngineApplicationCreate,
|
||||
Read: resourceAppEngineApplicationRead,
|
||||
Update: resourceAppEngineApplicationUpdate,
|
||||
Delete: resourceAppEngineApplicationDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
CustomizeDiff: customdiff.All(
|
||||
appEngineApplicationLocationIDCustomizeDiff,
|
||||
),
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateProjectID(),
|
||||
},
|
||||
"auth_domain": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"location_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
"northamerica-northeast1",
|
||||
"us-central",
|
||||
"us-west2",
|
||||
"us-east1",
|
||||
"us-east4",
|
||||
"southamerica-east1",
|
||||
"europe-west",
|
||||
"europe-west2",
|
||||
"europe-west3",
|
||||
"asia-northeast1",
|
||||
"asia-south1",
|
||||
"australia-southeast1",
|
||||
}, false),
|
||||
},
|
||||
"serving_status": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
"UNSPECIFIED",
|
||||
"SERVING",
|
||||
"USER_DISABLED",
|
||||
"SYSTEM_DISABLED",
|
||||
}, false),
|
||||
Computed: true,
|
||||
},
|
||||
"feature_settings": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: appEngineApplicationFeatureSettingsResource(),
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"url_dispatch_rule": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: appEngineApplicationURLDispatchRuleResource(),
|
||||
},
|
||||
"code_bucket": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"default_hostname": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"default_bucket": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"gcr_domain": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func appEngineApplicationURLDispatchRuleResource() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"domain": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"service": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func appEngineApplicationFeatureSettingsResource() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"split_health_checks": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func appEngineApplicationLocationIDCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
|
||||
old, new := d.GetChange("location_id")
|
||||
if old != "" && old != new {
|
||||
return fmt.Errorf("Cannot change location_id once the resource is created.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAppEngineApplicationCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := expandAppEngineApplication(d, project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[DEBUG] Creating App Engine App")
|
||||
op, err := config.clientAppEngine.Apps.Create(app).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating App Engine application: %s", err.Error())
|
||||
}
|
||||
|
||||
d.SetId(project)
|
||||
|
||||
// Wait for the operation to complete
|
||||
waitErr := appEngineOperationWait(config.clientAppEngine, op, project, "App Engine app to create")
|
||||
if waitErr != nil {
|
||||
d.SetId("")
|
||||
return waitErr
|
||||
}
|
||||
log.Printf("[DEBUG] Created App Engine App")
|
||||
|
||||
return resourceAppEngineApplicationRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAppEngineApplicationRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
pid := d.Id()
|
||||
|
||||
app, err := config.clientAppEngine.Apps.Get(pid).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("App Engine Application %q", pid))
|
||||
}
|
||||
d.Set("auth_domain", app.AuthDomain)
|
||||
d.Set("code_bucket", app.CodeBucket)
|
||||
d.Set("default_bucket", app.DefaultBucket)
|
||||
d.Set("default_hostname", app.DefaultHostname)
|
||||
d.Set("location_id", app.LocationId)
|
||||
d.Set("name", app.Name)
|
||||
d.Set("serving_status", app.ServingStatus)
|
||||
d.Set("project", pid)
|
||||
dispatchRules, err := flattenAppEngineApplicationDispatchRules(app.DispatchRules)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.Set("url_dispatch_rule", dispatchRules)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting dispatch rules in state. This is a bug, please report it at https://github.com/terraform-providers/terraform-provider-google/issues. Error is:\n%s", err.Error())
|
||||
}
|
||||
featureSettings, err := flattenAppEngineApplicationFeatureSettings(app.FeatureSettings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.Set("feature_settings", featureSettings)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting feature settings in state. This is a bug, please report it at https://github.com/terraform-providers/terraform-provider-google/issues. Error is:\n%s", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAppEngineApplicationUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
pid := d.Id()
|
||||
app, err := expandAppEngineApplication(d, pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[DEBUG] Updating App Engine App")
|
||||
op, err := config.clientAppEngine.Apps.Patch(pid, app).UpdateMask("authDomain,servingStatus,featureSettings.splitHealthChecks").Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating App Engine application: %s", err.Error())
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
waitErr := appEngineOperationWait(config.clientAppEngine, op, pid, "App Engine app to update")
|
||||
if waitErr != nil {
|
||||
return waitErr
|
||||
}
|
||||
log.Printf("[DEBUG] Updated App Engine App")
|
||||
|
||||
return resourceAppEngineApplicationRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAppEngineApplicationDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
log.Println("[WARN] App Engine applications cannot be destroyed once created. The project must be deleted to delete the application.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandAppEngineApplication(d *schema.ResourceData, project string) (*appengine.Application, error) {
|
||||
result := &appengine.Application{
|
||||
AuthDomain: d.Get("auth_domain").(string),
|
||||
LocationId: d.Get("location_id").(string),
|
||||
Id: project,
|
||||
GcrDomain: d.Get("gcr_domain").(string),
|
||||
ServingStatus: d.Get("serving_status").(string),
|
||||
}
|
||||
featureSettings, err := expandAppEngineApplicationFeatureSettings(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result.FeatureSettings = featureSettings
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func expandAppEngineApplicationFeatureSettings(d *schema.ResourceData) (*appengine.FeatureSettings, error) {
|
||||
blocks := d.Get("feature_settings").([]interface{})
|
||||
if len(blocks) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
return &appengine.FeatureSettings{
|
||||
SplitHealthChecks: d.Get("feature_settings.0.split_health_checks").(bool),
|
||||
// force send SplitHealthChecks, so if it's set to false it still gets disabled
|
||||
ForceSendFields: []string{"SplitHealthChecks"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func flattenAppEngineApplicationFeatureSettings(settings *appengine.FeatureSettings) ([]map[string]interface{}, error) {
|
||||
if settings == nil {
|
||||
return []map[string]interface{}{}, nil
|
||||
}
|
||||
result := map[string]interface{}{
|
||||
"split_health_checks": settings.SplitHealthChecks,
|
||||
}
|
||||
return []map[string]interface{}{result}, nil
|
||||
}
|
||||
|
||||
func flattenAppEngineApplicationDispatchRules(rules []*appengine.UrlDispatchRule) ([]map[string]interface{}, error) {
|
||||
results := make([]map[string]interface{}, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
results = append(results, map[string]interface{}{
|
||||
"domain": rule.Domain,
|
||||
"path": rule.Path,
|
||||
"service": rule.Service,
|
||||
})
|
||||
}
|
||||
return results, nil
|
||||
}
|
77
google/resource_app_engine_application_test.go
Normal file
77
google/resource_app_engine_application_test.go
Normal file
@ -0,0 +1,77 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAppEngineApplication_basic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
org := getTestOrgFromEnv(t)
|
||||
pid := acctest.RandomWithPrefix("tf-test")
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAppEngineApplication_basic(pid, org),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttrSet("google_app_engine_application.acceptance", "url_dispatch_rule.#"),
|
||||
resource.TestCheckResourceAttrSet("google_app_engine_application.acceptance", "name"),
|
||||
resource.TestCheckResourceAttrSet("google_app_engine_application.acceptance", "code_bucket"),
|
||||
resource.TestCheckResourceAttrSet("google_app_engine_application.acceptance", "default_hostname"),
|
||||
resource.TestCheckResourceAttrSet("google_app_engine_application.acceptance", "default_bucket"),
|
||||
),
|
||||
},
|
||||
{
|
||||
ResourceName: "google_app_engine_application.acceptance",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
{
|
||||
Config: testAccAppEngineApplication_update(pid, org),
|
||||
},
|
||||
{
|
||||
ResourceName: "google_app_engine_application.acceptance",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccAppEngineApplication_basic(pid, org string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "acceptance" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
}
|
||||
|
||||
resource "google_app_engine_application" "acceptance" {
|
||||
project = "${google_project.acceptance.project_id}"
|
||||
auth_domain = "hashicorptest.com"
|
||||
location_id = "us-central"
|
||||
serving_status = "SERVING"
|
||||
}`, pid, pid, org)
|
||||
}
|
||||
|
||||
func testAccAppEngineApplication_update(pid, org string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "acceptance" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
}
|
||||
|
||||
resource "google_app_engine_application" "acceptance" {
|
||||
project = "${google_project.acceptance.project_id}"
|
||||
auth_domain = "tf-test.club"
|
||||
location_id = "us-central"
|
||||
serving_status = "USER_DISABLED"
|
||||
}`, pid, pid, org)
|
||||
}
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/customdiff"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
|
||||
@ -15,7 +16,13 @@ func resourceBigtableInstance() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceBigtableInstanceCreate,
|
||||
Read: resourceBigtableInstanceRead,
|
||||
// TODO: Update is only needed because we're doing forcenew in customizediff
|
||||
// when we're done with the deprecation, we can drop customizediff and make cluster forcenew
|
||||
Update: schema.Noop,
|
||||
Delete: resourceBigtableInstanceDestroy,
|
||||
CustomizeDiff: customdiff.All(
|
||||
resourceBigTableInstanceClusterCustomizeDiff,
|
||||
),
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
@ -25,16 +32,49 @@ func resourceBigtableInstance() *schema.Resource {
|
||||
},
|
||||
|
||||
"cluster_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Deprecated: "Use cluster instead.",
|
||||
ConflictsWith: []string{"cluster"},
|
||||
},
|
||||
|
||||
"cluster": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cluster_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"zone": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"num_nodes": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"storage_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "SSD",
|
||||
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"zone": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "Use cluster instead.",
|
||||
ConflictsWith: []string{"cluster"},
|
||||
},
|
||||
|
||||
"display_name": {
|
||||
@ -45,9 +85,10 @@ func resourceBigtableInstance() *schema.Resource {
|
||||
},
|
||||
|
||||
"num_nodes": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Deprecated: "Use cluster instead.",
|
||||
ConflictsWith: []string{"cluster"},
|
||||
},
|
||||
|
||||
"instance_type": {
|
||||
@ -59,11 +100,12 @@ func resourceBigtableInstance() *schema.Resource {
|
||||
},
|
||||
|
||||
"storage_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "SSD",
|
||||
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "SSD",
|
||||
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
|
||||
Deprecated: "Use cluster instead.",
|
||||
ConflictsWith: []string{"cluster"},
|
||||
},
|
||||
|
||||
"project": {
|
||||
@ -76,6 +118,50 @@ func resourceBigtableInstance() *schema.Resource {
|
||||
}
|
||||
}
|
||||
|
||||
func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
|
||||
if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 {
|
||||
return fmt.Errorf("At least one cluster must be set.")
|
||||
}
|
||||
if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") &&
|
||||
!d.HasChange("storage_type") && !d.HasChange("cluster") {
|
||||
return nil
|
||||
}
|
||||
if d.Get("cluster.#").(int) == 1 {
|
||||
// if we have exactly one cluster, and it has the same values as the old top-level
|
||||
// values, we can assume the user is trying to go from the deprecated values to the
|
||||
// new values, and we shouldn't ForceNew. We know that the top-level values aren't
|
||||
// set, because they ConflictWith cluster.
|
||||
oldID, _ := d.GetChange("cluster_id")
|
||||
oldNodes, _ := d.GetChange("num_nodes")
|
||||
oldZone, _ := d.GetChange("zone")
|
||||
oldStorageType, _ := d.GetChange("storage_type")
|
||||
new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{})
|
||||
|
||||
if oldID.(string) == new["cluster_id"].(string) &&
|
||||
oldNodes.(int) == new["num_nodes"].(int) &&
|
||||
oldZone.(string) == new["zone"].(string) &&
|
||||
oldStorageType.(string) == new["storage_type"].(string) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if d.HasChange("cluster_id") {
|
||||
d.ForceNew("cluster_id")
|
||||
}
|
||||
if d.HasChange("cluster") {
|
||||
d.ForceNew("cluster")
|
||||
}
|
||||
if d.HasChange("zone") {
|
||||
d.ForceNew("zone")
|
||||
}
|
||||
if d.HasChange("num_nodes") {
|
||||
d.ForceNew("num_nodes")
|
||||
}
|
||||
if d.HasChange("storage_type") {
|
||||
d.ForceNew("storage_type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
ctx := context.Background()
|
||||
@ -85,46 +171,48 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
|
||||
return err
|
||||
}
|
||||
|
||||
name := d.Get("name").(string)
|
||||
conf := &bigtable.InstanceWithClustersConfig{
|
||||
InstanceID: d.Get("name").(string),
|
||||
}
|
||||
|
||||
displayName, ok := d.GetOk("display_name")
|
||||
if !ok {
|
||||
displayName = name
|
||||
displayName = conf.InstanceID
|
||||
}
|
||||
conf.DisplayName = displayName.(string)
|
||||
|
||||
var storageType bigtable.StorageType
|
||||
switch value := d.Get("storage_type"); value {
|
||||
case "HDD":
|
||||
storageType = bigtable.HDD
|
||||
case "SSD":
|
||||
storageType = bigtable.SSD
|
||||
}
|
||||
|
||||
numNodes := int32(d.Get("num_nodes").(int))
|
||||
var instanceType bigtable.InstanceType
|
||||
switch value := d.Get("instance_type"); value {
|
||||
switch d.Get("instance_type").(string) {
|
||||
case "DEVELOPMENT":
|
||||
instanceType = bigtable.DEVELOPMENT
|
||||
|
||||
if numNodes > 0 {
|
||||
return fmt.Errorf("Can't specify a non-zero number of nodes: %d for DEVELOPMENT Bigtable instance: %s", numNodes, name)
|
||||
}
|
||||
conf.InstanceType = bigtable.DEVELOPMENT
|
||||
case "PRODUCTION":
|
||||
instanceType = bigtable.PRODUCTION
|
||||
conf.InstanceType = bigtable.PRODUCTION
|
||||
}
|
||||
|
||||
zone, err := getZone(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceConf := &bigtable.InstanceConf{
|
||||
InstanceId: name,
|
||||
DisplayName: displayName.(string),
|
||||
ClusterId: d.Get("cluster_id").(string),
|
||||
NumNodes: numNodes,
|
||||
InstanceType: instanceType,
|
||||
StorageType: storageType,
|
||||
Zone: zone,
|
||||
if d.Get("cluster.#").(int) > 0 {
|
||||
// expand cluster
|
||||
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error expanding clusters: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
// TODO: remove this when we're done with the deprecation period
|
||||
zone, err := getZone(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster := bigtable.ClusterConfig{
|
||||
InstanceID: conf.InstanceID,
|
||||
NumNodes: int32(d.Get("num_nodes").(int)),
|
||||
Zone: zone,
|
||||
ClusterID: d.Get("cluster_id").(string),
|
||||
}
|
||||
switch d.Get("storage_type").(string) {
|
||||
case "HDD":
|
||||
cluster.StorageType = bigtable.HDD
|
||||
case "SSD":
|
||||
cluster.StorageType = bigtable.SSD
|
||||
}
|
||||
conf.Clusters = append(conf.Clusters, cluster)
|
||||
}
|
||||
|
||||
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
|
||||
@ -134,12 +222,12 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
|
||||
|
||||
defer c.Close()
|
||||
|
||||
err = c.CreateInstance(ctx, instanceConf)
|
||||
err = c.CreateInstanceWithClusters(ctx, conf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating instance. %s", err)
|
||||
}
|
||||
|
||||
d.SetId(name)
|
||||
d.SetId(conf.InstanceID)
|
||||
|
||||
return resourceBigtableInstanceRead(d, meta)
|
||||
}
|
||||
@ -153,11 +241,6 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
|
||||
return err
|
||||
}
|
||||
|
||||
zone, err := getZone(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error starting instance admin client. %s", err)
|
||||
@ -173,7 +256,37 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
|
||||
}
|
||||
|
||||
d.Set("project", project)
|
||||
d.Set("zone", zone)
|
||||
if d.Get("cluster.#").(int) > 0 {
|
||||
clusters := d.Get("cluster").(*schema.Set).List()
|
||||
clusterState := []map[string]interface{}{}
|
||||
for _, cl := range clusters {
|
||||
cluster := cl.(map[string]interface{})
|
||||
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
|
||||
if err != nil {
|
||||
if isGoogleApiErrorWithCode(err, 404) {
|
||||
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
|
||||
}
|
||||
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
|
||||
}
|
||||
err = d.Set("cluster", clusterState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
|
||||
}
|
||||
d.Set("cluster_id", "")
|
||||
d.Set("zone", "")
|
||||
d.Set("num_nodes", 0)
|
||||
d.Set("storage_type", "SSD")
|
||||
} else {
|
||||
// TODO remove this when we're done with our deprecation period
|
||||
zone, err := getZone(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("zone", zone)
|
||||
}
|
||||
d.Set("name", instance.Name)
|
||||
d.Set("display_name", instance.DisplayName)
|
||||
|
||||
@ -206,3 +319,38 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"zone": c.Zone,
|
||||
"num_nodes": c.ServeNodes,
|
||||
"cluster_id": c.Name,
|
||||
"storage_type": storageType,
|
||||
}
|
||||
}
|
||||
|
||||
func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig {
|
||||
results := make([]bigtable.ClusterConfig, 0, len(clusters))
|
||||
for _, c := range clusters {
|
||||
cluster := c.(map[string]interface{})
|
||||
zone := defaultZone
|
||||
if confZone, ok := cluster["zone"]; ok {
|
||||
zone = confZone.(string)
|
||||
}
|
||||
var storageType bigtable.StorageType
|
||||
switch cluster["storage_type"].(string) {
|
||||
case "SSD":
|
||||
storageType = bigtable.SSD
|
||||
case "HDD":
|
||||
storageType = bigtable.HDD
|
||||
}
|
||||
results = append(results, bigtable.ClusterConfig{
|
||||
InstanceID: instanceID,
|
||||
Zone: zone,
|
||||
ClusterID: cluster["cluster_id"].(string),
|
||||
NumNodes: int32(cluster["num_nodes"].(int)),
|
||||
StorageType: storageType,
|
||||
})
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
@ -107,11 +107,13 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc {
|
||||
func testAccBigtableInstance(instanceName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigtable_instance" "instance" {
|
||||
name = "%s"
|
||||
cluster_id = "%s"
|
||||
zone = "us-central1-b"
|
||||
num_nodes = 3
|
||||
storage_type = "HDD"
|
||||
name = "%s"
|
||||
cluster {
|
||||
cluster_id = "%s"
|
||||
zone = "us-central1-b"
|
||||
num_nodes = 3
|
||||
storage_type = "HDD"
|
||||
}
|
||||
}
|
||||
`, instanceName, instanceName)
|
||||
}
|
||||
@ -119,9 +121,11 @@ resource "google_bigtable_instance" "instance" {
|
||||
func testAccBigtableInstance_development(instanceName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigtable_instance" "instance" {
|
||||
name = "%s"
|
||||
cluster_id = "%s"
|
||||
zone = "us-central1-b"
|
||||
name = "%s"
|
||||
cluster {
|
||||
cluster_id = "%s"
|
||||
zone = "us-central1-b"
|
||||
}
|
||||
instance_type = "DEVELOPMENT"
|
||||
}
|
||||
`, instanceName, instanceName)
|
||||
|
@ -363,6 +363,9 @@ func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v inter
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -411,6 +411,9 @@ func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -78,10 +78,11 @@ func joinMapKeys(mapToJoin *map[int]bool) string {
|
||||
|
||||
func resourceCloudFunctionsFunction() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCloudFunctionsCreate,
|
||||
Read: resourceCloudFunctionsRead,
|
||||
Update: resourceCloudFunctionsUpdate,
|
||||
Delete: resourceCloudFunctionsDestroy,
|
||||
Create: resourceCloudFunctionsCreate,
|
||||
Read: resourceCloudFunctionsRead,
|
||||
Update: resourceCloudFunctionsUpdate,
|
||||
Delete: resourceCloudFunctionsDestroy,
|
||||
CustomizeDiff: resourceCloudFunctionsCustomizeDiff,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
@ -177,7 +178,8 @@ func resourceCloudFunctionsFunction() *schema.Resource {
|
||||
"trigger_bucket": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
Deprecated: "This field is deprecated. Use `event_trigger` instead.",
|
||||
ConflictsWith: []string{"trigger_http", "trigger_topic"},
|
||||
},
|
||||
|
||||
@ -191,10 +193,46 @@ func resourceCloudFunctionsFunction() *schema.Resource {
|
||||
"trigger_topic": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
Deprecated: "This field is deprecated. Use `event_trigger` instead.",
|
||||
ConflictsWith: []string{"trigger_http", "trigger_bucket"},
|
||||
},
|
||||
|
||||
"event_trigger": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ConflictsWith: []string{"trigger_http", "retry_on_failure", "trigger_topic", "trigger_http"},
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"event_type": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"resource": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"failure_policy": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"retry": {
|
||||
Type: schema.TypeBool,
|
||||
// not strictly required, but this way an empty block can't be specified
|
||||
Required: true,
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"https_trigger_url": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
@ -204,6 +242,8 @@ func resourceCloudFunctionsFunction() *schema.Resource {
|
||||
"retry_on_failure": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Deprecated: "This field is deprecated. Use `event_trigger.failure_policy.retry` instead.",
|
||||
ConflictsWith: []string{"trigger_http"},
|
||||
},
|
||||
|
||||
@ -225,6 +265,28 @@ func resourceCloudFunctionsFunction() *schema.Resource {
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCloudFunctionsCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error {
|
||||
if diff.HasChange("trigger_topic") {
|
||||
_, n := diff.GetChange("trigger_topic")
|
||||
if n == "" {
|
||||
diff.Clear("trigger_topic")
|
||||
} else {
|
||||
diff.ForceNew("trigger_topic")
|
||||
}
|
||||
}
|
||||
|
||||
if diff.HasChange("trigger_bucket") {
|
||||
_, n := diff.GetChange("trigger_bucket")
|
||||
if n == "" {
|
||||
diff.Clear("trigger_bucket")
|
||||
} else {
|
||||
diff.ForceNew("trigger_bucket")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
@ -253,7 +315,8 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro
|
||||
}
|
||||
|
||||
function := &cloudfunctions.CloudFunction{
|
||||
Name: cloudFuncId.cloudFunctionId(),
|
||||
Name: cloudFuncId.cloudFunctionId(),
|
||||
ForceSendFields: []string{},
|
||||
}
|
||||
|
||||
sourceArchiveBucket := d.Get("source_archive_bucket").(string)
|
||||
@ -277,13 +340,11 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro
|
||||
function.Timeout = fmt.Sprintf("%vs", v.(int))
|
||||
}
|
||||
|
||||
v, triggHttpOk := d.GetOk("trigger_http")
|
||||
if triggHttpOk && v.(bool) {
|
||||
if v, ok := d.GetOk("event_trigger"); ok {
|
||||
function.EventTrigger = expandEventTrigger(v.([]interface{}), project)
|
||||
} else if v, ok := d.GetOk("trigger_http"); ok && v.(bool) {
|
||||
function.HttpsTrigger = &cloudfunctions.HttpsTrigger{}
|
||||
}
|
||||
|
||||
v, triggTopicOk := d.GetOk("trigger_topic")
|
||||
if triggTopicOk {
|
||||
} else if v, ok := d.GetOk("trigger_topic"); ok {
|
||||
// Make PubSub event publish as in https://cloud.google.com/functions/docs/calling/pubsub
|
||||
function.EventTrigger = &cloudfunctions.EventTrigger{
|
||||
// Other events are not supported
|
||||
@ -297,10 +358,7 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro
|
||||
Retry: &cloudfunctions.Retry{},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v, triggBucketOk := d.GetOk("trigger_bucket")
|
||||
if triggBucketOk {
|
||||
} else if v, ok := d.GetOk("trigger_bucket"); ok {
|
||||
// Make Storage event as in https://cloud.google.com/functions/docs/calling/storage
|
||||
function.EventTrigger = &cloudfunctions.EventTrigger{
|
||||
EventType: "providers/cloud.storage/eventTypes/object.change",
|
||||
@ -313,10 +371,8 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro
|
||||
Retry: &cloudfunctions.Retry{},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !triggHttpOk && !triggTopicOk && !triggBucketOk {
|
||||
return fmt.Errorf("One of arguments [trigger_topic, trigger_bucket, trigger_http] is required: " +
|
||||
} else {
|
||||
return fmt.Errorf("One of `event_trigger` or `trigger_http` is required: " +
|
||||
"You must specify a trigger when deploying a new function.")
|
||||
}
|
||||
|
||||
@ -389,16 +445,24 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error
|
||||
d.Set("https_trigger_url", function.HttpsTrigger.Url)
|
||||
}
|
||||
|
||||
d.Set("event_trigger", flattenEventTrigger(function.EventTrigger))
|
||||
if function.EventTrigger != nil {
|
||||
switch function.EventTrigger.EventType {
|
||||
// From https://github.com/google/google-api-go-client/blob/master/cloudfunctions/v1/cloudfunctions-gen.go#L335
|
||||
case "google.pubsub.topic.publish":
|
||||
d.Set("trigger_topic", GetResourceNameFromSelfLink(function.EventTrigger.Resource))
|
||||
if _, ok := d.GetOk("trigger_topic"); ok {
|
||||
d.Set("trigger_topic", GetResourceNameFromSelfLink(function.EventTrigger.Resource))
|
||||
}
|
||||
case "providers/cloud.storage/eventTypes/object.change":
|
||||
d.Set("trigger_bucket", GetResourceNameFromSelfLink(function.EventTrigger.Resource))
|
||||
if _, ok := d.GetOk("trigger_bucket"); ok {
|
||||
d.Set("trigger_bucket", GetResourceNameFromSelfLink(function.EventTrigger.Resource))
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("retry_on_failure"); ok {
|
||||
retry := function.EventTrigger.FailurePolicy != nil && function.EventTrigger.FailurePolicy.Retry != nil
|
||||
d.Set("retry_on_failure", retry)
|
||||
}
|
||||
retry := function.EventTrigger.FailurePolicy != nil && function.EventTrigger.FailurePolicy.Retry != nil
|
||||
d.Set("retry_on_failure", retry)
|
||||
}
|
||||
d.Set("region", cloudFuncId.Region)
|
||||
d.Set("project", cloudFuncId.Project)
|
||||
@ -410,6 +474,11 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||
log.Printf("[DEBUG]: Updating google_cloudfunctions_function")
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cloudFuncId, err := parseCloudFunctionId(d.Id(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -455,6 +524,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||
updateMaskArr = append(updateMaskArr, "environment_variables")
|
||||
}
|
||||
|
||||
// Event trigger will run after failure policy and take precedence
|
||||
if d.HasChange("retry_on_failure") {
|
||||
if d.Get("retry_on_failure").(bool) {
|
||||
if function.EventTrigger == nil {
|
||||
@ -467,6 +537,11 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||
updateMaskArr = append(updateMaskArr, "eventTrigger.failurePolicy.retry")
|
||||
}
|
||||
|
||||
if d.HasChange("event_trigger") {
|
||||
function.EventTrigger = expandEventTrigger(d.Get("event_trigger").([]interface{}), project)
|
||||
updateMaskArr = append(updateMaskArr, "eventTrigger", "eventTrigger.failurePolicy.retry")
|
||||
}
|
||||
|
||||
if len(updateMaskArr) > 0 {
|
||||
log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function)
|
||||
updateMask := strings.Join(updateMaskArr, ",")
|
||||
@ -509,3 +584,70 @@ func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandEventTrigger(configured []interface{}, project string) *cloudfunctions.EventTrigger {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if data, ok := configured[0].(map[string]interface{}); ok {
|
||||
eventType := data["event_type"].(string)
|
||||
shape := ""
|
||||
switch {
|
||||
case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"):
|
||||
shape = "projects/%s/buckets/%s"
|
||||
case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"):
|
||||
shape = "projects/%s/topics/%s"
|
||||
}
|
||||
|
||||
return &cloudfunctions.EventTrigger{
|
||||
EventType: eventType,
|
||||
Resource: fmt.Sprintf(shape, project, data["resource"].(string)),
|
||||
FailurePolicy: expandFailurePolicy(data["failure_policy"].([]interface{})),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenEventTrigger(eventTrigger *cloudfunctions.EventTrigger) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, 1)
|
||||
if eventTrigger == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
result = append(result, map[string]interface{}{
|
||||
"event_type": eventTrigger.EventType,
|
||||
"resource": GetResourceNameFromSelfLink(eventTrigger.Resource),
|
||||
"failure_policy": flattenFailurePolicy(eventTrigger.FailurePolicy),
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func expandFailurePolicy(configured []interface{}) *cloudfunctions.FailurePolicy {
|
||||
if len(configured) == 0 {
|
||||
return &cloudfunctions.FailurePolicy{}
|
||||
}
|
||||
|
||||
if data, ok := configured[0].(map[string]interface{}); ok && data["retry"].(bool) {
|
||||
return &cloudfunctions.FailurePolicy{
|
||||
Retry: &cloudfunctions.Retry{},
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenFailurePolicy(failurePolicy *cloudfunctions.FailurePolicy) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, 1)
|
||||
if failurePolicy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result = append(result, map[string]interface{}{
|
||||
"retry": failurePolicy.Retry != nil,
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
@ -134,8 +134,6 @@ func TestAccCloudFunctionsFunction_update(t *testing.T) {
|
||||
func TestAccCloudFunctionsFunction_pubsub(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var function cloudfunctions.CloudFunction
|
||||
|
||||
funcResourceName := "google_cloudfunctions_function.function"
|
||||
functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt())
|
||||
@ -154,6 +152,39 @@ func TestAccCloudFunctionsFunction_pubsub(t *testing.T) {
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_pubsub(functionName, bucketName,
|
||||
topicName, zipFilePath),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccCloudFunctionsFunction_oldPubsub(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var function cloudfunctions.CloudFunction
|
||||
|
||||
funcResourceName := "google_cloudfunctions_function.function"
|
||||
functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt())
|
||||
topicName := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(10))
|
||||
zipFilePath, err := createZIPArchiveForIndexJs(testPubSubTriggerPath)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer os.Remove(zipFilePath) // clean up
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckCloudFunctionsFunctionDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_oldPubsub(functionName, bucketName,
|
||||
topicName, zipFilePath),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCloudFunctionsFunctionExists(
|
||||
funcResourceName, &function),
|
||||
@ -169,6 +200,43 @@ func TestAccCloudFunctionsFunction_pubsub(t *testing.T) {
|
||||
"trigger_topic", topicName),
|
||||
),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"retry_on_failure", "trigger_topic"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccCloudFunctionsFunction_bucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
funcResourceName := "google_cloudfunctions_function.function"
|
||||
functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt())
|
||||
zipFilePath, err := createZIPArchiveForIndexJs(testBucketTriggerPath)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer os.Remove(zipFilePath) // clean up
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckCloudFunctionsFunctionDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_bucket(functionName, bucketName, zipFilePath),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_bucketNoRetry(functionName, bucketName, zipFilePath),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
@ -177,7 +245,8 @@ func TestAccCloudFunctionsFunction_pubsub(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
func TestAccCloudFunctionsFunction_bucket(t *testing.T) {
|
||||
|
||||
func TestAccCloudFunctionsFunction_oldBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var function cloudfunctions.CloudFunction
|
||||
@ -197,7 +266,7 @@ func TestAccCloudFunctionsFunction_bucket(t *testing.T) {
|
||||
CheckDestroy: testAccCheckCloudFunctionsFunctionDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_bucket(functionName, bucketName, zipFilePath),
|
||||
Config: testAccCloudFunctionsFunction_oldBucket(functionName, bucketName, zipFilePath),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCloudFunctionsFunctionExists(
|
||||
funcResourceName, &function),
|
||||
@ -214,12 +283,13 @@ func TestAccCloudFunctionsFunction_bucket(t *testing.T) {
|
||||
),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"retry_on_failure", "trigger_bucket"},
|
||||
},
|
||||
{
|
||||
Config: testAccCloudFunctionsFunction_bucketNoRetry(functionName, bucketName, zipFilePath),
|
||||
Config: testAccCloudFunctionsFunction_OldBucketNoRetry(functionName, bucketName, zipFilePath),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCloudFunctionsFunctionExists(
|
||||
funcResourceName, &function),
|
||||
@ -236,9 +306,10 @@ func TestAccCloudFunctionsFunction_bucket(t *testing.T) {
|
||||
),
|
||||
},
|
||||
{
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ResourceName: funcResourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"retry_on_failure", "trigger_bucket"},
|
||||
},
|
||||
},
|
||||
})
|
||||
@ -471,7 +542,7 @@ resource "google_cloudfunctions_function" "function" {
|
||||
}`, bucketName, zipFilePath, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_pubsub(functionName string, bucketName string,
|
||||
func testAccCloudFunctionsFunction_oldPubsub(functionName string, bucketName string,
|
||||
topic string, zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
@ -500,6 +571,40 @@ resource "google_cloudfunctions_function" "function" {
|
||||
}`, bucketName, zipFilePath, topic, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_pubsub(functionName string, bucketName string,
|
||||
topic string, zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket_object" "archive" {
|
||||
name = "index.zip"
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
source = "%s"
|
||||
}
|
||||
|
||||
resource "google_pubsub_topic" "sub" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "google_cloudfunctions_function" "function" {
|
||||
name = "%s"
|
||||
available_memory_mb = 128
|
||||
source_archive_bucket = "${google_storage_bucket.bucket.name}"
|
||||
source_archive_object = "${google_storage_bucket_object.archive.name}"
|
||||
timeout = 61
|
||||
entry_point = "helloPubSub"
|
||||
event_trigger {
|
||||
event_type = "providers/cloud.pubsub/eventTypes/topic.publish"
|
||||
resource = "${google_pubsub_topic.sub.name}"
|
||||
failure_policy {
|
||||
retry = false
|
||||
}
|
||||
}
|
||||
}`, bucketName, zipFilePath, topic, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_bucket(functionName string, bucketName string,
|
||||
zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
@ -513,6 +618,63 @@ resource "google_storage_bucket_object" "archive" {
|
||||
source = "%s"
|
||||
}
|
||||
|
||||
resource "google_cloudfunctions_function" "function" {
|
||||
name = "%s"
|
||||
available_memory_mb = 128
|
||||
source_archive_bucket = "${google_storage_bucket.bucket.name}"
|
||||
source_archive_object = "${google_storage_bucket_object.archive.name}"
|
||||
timeout = 61
|
||||
entry_point = "helloGCS"
|
||||
event_trigger {
|
||||
event_type = "providers/cloud.storage/eventTypes/object.change"
|
||||
resource = "${google_storage_bucket.bucket.name}"
|
||||
failure_policy {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
}`, bucketName, zipFilePath, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_bucketNoRetry(functionName string, bucketName string,
|
||||
zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket_object" "archive" {
|
||||
name = "index.zip"
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
source = "%s"
|
||||
}
|
||||
|
||||
resource "google_cloudfunctions_function" "function" {
|
||||
name = "%s"
|
||||
available_memory_mb = 128
|
||||
source_archive_bucket = "${google_storage_bucket.bucket.name}"
|
||||
source_archive_object = "${google_storage_bucket_object.archive.name}"
|
||||
timeout = 61
|
||||
entry_point = "helloGCS"
|
||||
event_trigger {
|
||||
event_type = "providers/cloud.storage/eventTypes/object.change"
|
||||
resource = "${google_storage_bucket.bucket.name}"
|
||||
}
|
||||
}`, bucketName, zipFilePath, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_oldBucket(functionName string, bucketName string,
|
||||
zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket_object" "archive" {
|
||||
name = "index.zip"
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
source = "%s"
|
||||
}
|
||||
|
||||
resource "google_cloudfunctions_function" "function" {
|
||||
name = "%s"
|
||||
available_memory_mb = 128
|
||||
@ -525,7 +687,7 @@ resource "google_cloudfunctions_function" "function" {
|
||||
}`, bucketName, zipFilePath, functionName)
|
||||
}
|
||||
|
||||
func testAccCloudFunctionsFunction_bucketNoRetry(functionName string, bucketName string,
|
||||
func testAccCloudFunctionsFunction_OldBucketNoRetry(functionName string, bucketName string,
|
||||
zipFilePath string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
|
@ -629,6 +629,9 @@ func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.Res
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -159,10 +159,11 @@ func resourceComputeBackendService() *schema.Resource {
|
||||
},
|
||||
|
||||
"custom_request_headers": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
|
@ -744,6 +744,9 @@ func expandComputeFirewallAllow(v interface{}, d *schema.ResourceData, config *C
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
@ -779,6 +782,9 @@ func expandComputeFirewallDeny(v interface{}, d *schema.ResourceData, config *Co
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -56,10 +56,11 @@ func resourceComputeGlobalForwardingRule() *schema.Resource {
|
||||
},
|
||||
|
||||
"labels": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"label_fingerprint": &schema.Schema{
|
||||
|
@ -38,10 +38,10 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
|
||||
},
|
||||
|
||||
"version": &schema.Schema{
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Deprecated: "Use the instance_group_manager resource in the google-beta provider instead. See https://terraform.io/docs/providers/google/provider-versions.html for more details.",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
@ -169,10 +169,10 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
|
||||
},
|
||||
|
||||
"auto_healing_policies": &schema.Schema{
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Deprecated: "Use the instance_group_manager resource in the google-beta provider instead. See https://terraform.io/docs/providers/google/provider-versions.html for more details.",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"health_check": &schema.Schema{
|
||||
@ -191,10 +191,10 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
|
||||
},
|
||||
|
||||
"rolling_update_policy": &schema.Schema{
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Deprecated: "Use the instance_group_manager resource in the google-beta provider instead. See https://terraform.io/docs/providers/google/provider-versions.html for more details.",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"minimal_action": &schema.Schema{
|
||||
|
@ -625,6 +625,9 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *sche
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -140,7 +140,7 @@ func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error
|
||||
descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
|
||||
} else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) {
|
||||
obj["description"] = descriptionProp
|
||||
}
|
||||
networkProp, err := expandComputeRouterNetwork(d.Get("network"), d, config)
|
||||
@ -270,7 +270,7 @@ func resourceComputeRouterUpdate(d *schema.ResourceData, meta interface{}) error
|
||||
descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
|
||||
} else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) {
|
||||
obj["description"] = descriptionProp
|
||||
}
|
||||
networkProp, err := expandComputeRouterNetwork(d.Get("network"), d, config)
|
||||
@ -515,14 +515,14 @@ func expandComputeRouterBgp(v interface{}, d *schema.ResourceData, config *Confi
|
||||
transformedAdvertisedGroups, err := expandComputeRouterBgpAdvertisedGroups(original["advertised_groups"], d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if val := reflect.ValueOf(transformedAdvertisedGroups); val.IsValid() && !isEmptyValue(val) {
|
||||
} else {
|
||||
transformed["advertisedGroups"] = transformedAdvertisedGroups
|
||||
}
|
||||
|
||||
transformedAdvertisedIpRanges, err := expandComputeRouterBgpAdvertisedIpRanges(original["advertised_ip_ranges"], d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if val := reflect.ValueOf(transformedAdvertisedIpRanges); val.IsValid() && !isEmptyValue(val) {
|
||||
} else {
|
||||
transformed["advertisedIpRanges"] = transformedAdvertisedIpRanges
|
||||
}
|
||||
|
||||
@ -545,20 +545,23 @@ func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.ResourceD
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
transformedRange, err := expandComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) {
|
||||
} else {
|
||||
transformed["range"] = transformedRange
|
||||
}
|
||||
|
||||
transformedDescription, err := expandComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) {
|
||||
} else {
|
||||
transformed["description"] = transformedDescription
|
||||
}
|
||||
|
||||
|
@ -639,6 +639,9 @@ func expandComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceDa
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -97,6 +97,7 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
|
||||
"region": {
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
@ -201,9 +202,10 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
|
||||
"enable_binary_authorization": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"enable_kubernetes_alpha": {
|
||||
@ -214,10 +216,11 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
|
||||
"enable_tpu": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"enable_legacy_abac": {
|
||||
@ -392,9 +395,10 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
|
||||
"pod_security_policy_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"enabled": {
|
||||
@ -501,13 +505,15 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
|
||||
"private_cluster": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"master_ipv4_cidr_block": {
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
|
@ -54,9 +54,10 @@ func resourceContainerNodePool() *schema.Resource {
|
||||
ForceNew: true,
|
||||
},
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
}),
|
||||
}
|
||||
@ -85,10 +86,11 @@ var schemaNodePool = map[string]*schema.Schema{
|
||||
},
|
||||
|
||||
"max_pods_per_node": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"initial_node_count": &schema.Schema{
|
||||
|
@ -524,6 +524,9 @@ func expandFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, co
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
@ -558,6 +561,9 @@ func expandFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, conf
|
||||
l := v.([]interface{})
|
||||
req := make([]interface{}, 0, len(l))
|
||||
for _, raw := range l {
|
||||
if raw == nil {
|
||||
continue
|
||||
}
|
||||
original := raw.(map[string]interface{})
|
||||
transformed := make(map[string]interface{})
|
||||
|
||||
|
@ -92,10 +92,12 @@ func resourceGoogleProject() *schema.Resource {
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"app_engine": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: appEngineResource(),
|
||||
MaxItems: 1,
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: appEngineResource(),
|
||||
MaxItems: 1,
|
||||
Deprecated: "Use the google_app_engine_application resource instead.",
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -206,13 +208,8 @@ func appEngineFeatureSettingsResource() *schema.Resource {
|
||||
}
|
||||
|
||||
func resourceGoogleProjectCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error {
|
||||
if old, new := diff.GetChange("app_engine.#"); old != nil && new != nil && old.(int) > 0 && new.(int) < 1 {
|
||||
// if we're going from app_engine set to unset, we need to delete the project, app_engine has no delete
|
||||
return diff.ForceNew("app_engine")
|
||||
} else if old, _ := diff.GetChange("app_engine.0.location_id"); diff.HasChange("app_engine.0.location_id") && old != nil && old.(string) != "" {
|
||||
// if location_id was already set, and has a new value, that forces a new app
|
||||
// if location_id wasn't set, don't force a new value, as we're just enabling app engine
|
||||
return diff.ForceNew("app_engine.0.location_id")
|
||||
if old, _ := diff.GetChange("app_engine.0.location_id"); diff.HasChange("app_engine.0.location_id") && old != nil && old.(string) != "" {
|
||||
return fmt.Errorf("Cannot change app_engine.0.location_id once the app is created.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
1245
google/resource_monitoring_alert_policy.go
Normal file
1245
google/resource_monitoring_alert_policy.go
Normal file
File diff suppressed because it is too large
Load Diff
211
google/resource_monitoring_alert_policy_test.go
Normal file
211
google/resource_monitoring_alert_policy_test.go
Normal file
@ -0,0 +1,211 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Stackdriver tests cannot be run in parallel otherwise they will error out with:
|
||||
// Error 503: Too many concurrent edits to the project configuration. Please try again.
|
||||
|
||||
func TestAccMonitoringAlertPolicy_basic(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
filter := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, "ALIGN_RATE", filter),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccMonitoringAlertPolicy_update(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
filter1 := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
|
||||
aligner1 := "ALIGN_RATE"
|
||||
filter2 := `metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"`
|
||||
aligner2 := "ALIGN_MAX"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner1, filter1),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner2, filter2),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.basic",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccMonitoringAlertPolicy_full(t *testing.T) {
|
||||
|
||||
alertName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName1 := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
conditionName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAlertPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_monitoring_alert_policy.full",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAlertPolicyDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_monitoring_alert_policy" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
|
||||
url := fmt.Sprintf("https://monitoring.googleapis.com/v3/%s", name)
|
||||
_, err := sendRequest(config, "GET", url, nil)
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Error, alert policy %s still exists", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccMonitoringAlertPolicy_basic(alertName, conditionName, aligner, filter string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_monitoring_alert_policy" "basic" {
|
||||
display_name = "%s"
|
||||
enabled = true
|
||||
combiner = "OR"
|
||||
|
||||
conditions = [
|
||||
{
|
||||
display_name = "%s"
|
||||
|
||||
condition_threshold = {
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "%s"
|
||||
},
|
||||
]
|
||||
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
filter = "%s"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
`, alertName, conditionName, aligner, filter)
|
||||
}
|
||||
|
||||
func testAccMonitoringAlertPolicy_full(alertName, conditionName1, conditionName2 string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_monitoring_alert_policy" "full" {
|
||||
display_name = "%s"
|
||||
combiner = "OR"
|
||||
enabled = true
|
||||
|
||||
conditions = [
|
||||
{
|
||||
display_name = "%s"
|
||||
|
||||
condition_threshold = {
|
||||
threshold_value = 50
|
||||
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "ALIGN_RATE"
|
||||
cross_series_reducer = "REDUCE_MEAN"
|
||||
|
||||
group_by_fields = [
|
||||
"metric.label.device_name",
|
||||
"project",
|
||||
"resource.label.instance_id",
|
||||
"resource.label.zone",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
trigger = {
|
||||
percent = 10
|
||||
}
|
||||
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""
|
||||
}
|
||||
},
|
||||
{
|
||||
condition_absent {
|
||||
duration = "3600s"
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\""
|
||||
|
||||
aggregations {
|
||||
alignment_period = "60s"
|
||||
cross_series_reducer = "REDUCE_MEAN"
|
||||
per_series_aligner = "ALIGN_MEAN"
|
||||
|
||||
group_by_fields = [
|
||||
"project",
|
||||
"resource.label.instance_id",
|
||||
"resource.label.zone",
|
||||
]
|
||||
}
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
}
|
||||
|
||||
display_name = "%s"
|
||||
},
|
||||
]
|
||||
}
|
||||
`, alertName, conditionName1, conditionName2)
|
||||
}
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
sqladmin "google.golang.org/api/sqladmin/v1beta4"
|
||||
"google.golang.org/api/sqladmin/v1beta4"
|
||||
)
|
||||
|
||||
var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{
|
||||
@ -442,260 +442,36 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
databaseVersion := d.Get("database_version").(string)
|
||||
|
||||
_settingsList := d.Get("settings").([]interface{})
|
||||
|
||||
_settings := _settingsList[0].(map[string]interface{})
|
||||
settings := &sqladmin.Settings{
|
||||
Tier: _settings["tier"].(string),
|
||||
ForceSendFields: []string{"StorageAutoResize"},
|
||||
var name string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
name = v.(string)
|
||||
} else {
|
||||
name = resource.UniqueId()
|
||||
}
|
||||
|
||||
if v, ok := _settings["activation_policy"]; ok {
|
||||
settings.ActivationPolicy = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := _settings["authorized_gae_applications"]; ok {
|
||||
settings.AuthorizedGaeApplications = make([]string, 0)
|
||||
for _, app := range v.([]interface{}) {
|
||||
settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications,
|
||||
app.(string))
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["availability_type"]; ok {
|
||||
settings.AvailabilityType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := _settings["backup_configuration"]; ok {
|
||||
_backupConfigurationList := v.([]interface{})
|
||||
|
||||
if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil {
|
||||
settings.BackupConfiguration = &sqladmin.BackupConfiguration{}
|
||||
_backupConfiguration := _backupConfigurationList[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := _backupConfiguration["binary_log_enabled"]; okp {
|
||||
settings.BackupConfiguration.BinaryLogEnabled = vp.(bool)
|
||||
}
|
||||
|
||||
if vp, okp := _backupConfiguration["enabled"]; okp {
|
||||
settings.BackupConfiguration.Enabled = vp.(bool)
|
||||
}
|
||||
|
||||
if vp, okp := _backupConfiguration["start_time"]; okp {
|
||||
settings.BackupConfiguration.StartTime = vp.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["crash_safe_replication"]; ok {
|
||||
settings.CrashSafeReplicationEnabled = v.(bool)
|
||||
}
|
||||
|
||||
// 1st Generation instances don't support the disk_autoresize parameter
|
||||
if !isFirstGen(d) {
|
||||
autoResize := _settings["disk_autoresize"].(bool)
|
||||
settings.StorageAutoResize = &autoResize
|
||||
}
|
||||
|
||||
if v, ok := _settings["disk_size"]; ok && v.(int) > 0 {
|
||||
settings.DataDiskSizeGb = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 {
|
||||
settings.DataDiskType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := _settings["database_flags"]; ok {
|
||||
settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0)
|
||||
_databaseFlagsList := v.([]interface{})
|
||||
for _, _flag := range _databaseFlagsList {
|
||||
_entry := _flag.(map[string]interface{})
|
||||
flag := &sqladmin.DatabaseFlags{}
|
||||
if vp, okp := _entry["name"]; okp {
|
||||
flag.Name = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _entry["value"]; okp {
|
||||
flag.Value = vp.(string)
|
||||
}
|
||||
|
||||
settings.DatabaseFlags = append(settings.DatabaseFlags, flag)
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["ip_configuration"]; ok {
|
||||
_ipConfigurationList := v.([]interface{})
|
||||
|
||||
if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil {
|
||||
settings.IpConfiguration = &sqladmin.IpConfiguration{}
|
||||
_ipConfiguration := _ipConfigurationList[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := _ipConfiguration["ipv4_enabled"]; okp {
|
||||
settings.IpConfiguration.Ipv4Enabled = vp.(bool)
|
||||
}
|
||||
|
||||
if vp, okp := _ipConfiguration["require_ssl"]; okp {
|
||||
settings.IpConfiguration.RequireSsl = vp.(bool)
|
||||
}
|
||||
|
||||
if vp, okp := _ipConfiguration["authorized_networks"]; okp {
|
||||
settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0)
|
||||
_authorizedNetworksList := vp.(*schema.Set).List()
|
||||
for _, _acl := range _authorizedNetworksList {
|
||||
_entry := _acl.(map[string]interface{})
|
||||
entry := &sqladmin.AclEntry{}
|
||||
|
||||
if vpp, okpp := _entry["expiration_time"]; okpp {
|
||||
entry.ExpirationTime = vpp.(string)
|
||||
}
|
||||
|
||||
if vpp, okpp := _entry["name"]; okpp {
|
||||
entry.Name = vpp.(string)
|
||||
}
|
||||
|
||||
if vpp, okpp := _entry["value"]; okpp {
|
||||
entry.Value = vpp.(string)
|
||||
}
|
||||
|
||||
settings.IpConfiguration.AuthorizedNetworks = append(
|
||||
settings.IpConfiguration.AuthorizedNetworks, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["location_preference"]; ok {
|
||||
_locationPreferenceList := v.([]interface{})
|
||||
|
||||
if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil {
|
||||
settings.LocationPreference = &sqladmin.LocationPreference{}
|
||||
_locationPreference := _locationPreferenceList[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := _locationPreference["follow_gae_application"]; okp {
|
||||
settings.LocationPreference.FollowGaeApplication = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _locationPreference["zone"]; okp {
|
||||
settings.LocationPreference.Zone = vp.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["maintenance_window"]; ok {
|
||||
windows := v.([]interface{})
|
||||
if len(windows) > 0 && windows[0] != nil {
|
||||
settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{}
|
||||
window := windows[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := window["day"]; okp {
|
||||
settings.MaintenanceWindow.Day = int64(vp.(int))
|
||||
}
|
||||
|
||||
if vp, okp := window["hour"]; okp {
|
||||
settings.MaintenanceWindow.Hour = int64(vp.(int))
|
||||
}
|
||||
|
||||
if vp, ok := window["update_track"]; ok {
|
||||
if len(vp.(string)) > 0 {
|
||||
settings.MaintenanceWindow.UpdateTrack = vp.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := _settings["pricing_plan"]; ok {
|
||||
settings.PricingPlan = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := _settings["replication_type"]; ok {
|
||||
settings.ReplicationType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := _settings["user_labels"]; ok {
|
||||
settings.UserLabels = convertStringMap(v.(map[string]interface{}))
|
||||
}
|
||||
d.Set("name", name)
|
||||
|
||||
instance := &sqladmin.DatabaseInstance{
|
||||
Region: region,
|
||||
Settings: settings,
|
||||
DatabaseVersion: databaseVersion,
|
||||
Name: name,
|
||||
Region: region,
|
||||
Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{}), !isFirstGen(d)),
|
||||
DatabaseVersion: d.Get("database_version").(string),
|
||||
MasterInstanceName: d.Get("master_instance_name").(string),
|
||||
ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
instance.Name = v.(string)
|
||||
} else {
|
||||
instance.Name = resource.UniqueId()
|
||||
d.Set("name", instance.Name)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("replica_configuration"); ok {
|
||||
_replicaConfigurationList := v.([]interface{})
|
||||
|
||||
if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil {
|
||||
replicaConfiguration := &sqladmin.ReplicaConfiguration{}
|
||||
mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{}
|
||||
_replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := _replicaConfiguration["failover_target"]; okp {
|
||||
replicaConfiguration.FailoverTarget = vp.(bool)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["ca_certificate"]; okp {
|
||||
mySqlReplicaConfiguration.CaCertificate = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["client_certificate"]; okp {
|
||||
mySqlReplicaConfiguration.ClientCertificate = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["client_key"]; okp {
|
||||
mySqlReplicaConfiguration.ClientKey = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp {
|
||||
mySqlReplicaConfiguration.ConnectRetryInterval = int64(vp.(int))
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["dump_file_path"]; okp {
|
||||
mySqlReplicaConfiguration.DumpFilePath = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp {
|
||||
mySqlReplicaConfiguration.MasterHeartbeatPeriod = int64(vp.(int))
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["password"]; okp {
|
||||
mySqlReplicaConfiguration.Password = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["ssl_cipher"]; okp {
|
||||
mySqlReplicaConfiguration.SslCipher = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["username"]; okp {
|
||||
mySqlReplicaConfiguration.Username = vp.(string)
|
||||
}
|
||||
|
||||
if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp {
|
||||
mySqlReplicaConfiguration.VerifyServerCertificate = vp.(bool)
|
||||
}
|
||||
|
||||
replicaConfiguration.MysqlReplicaConfiguration = mySqlReplicaConfiguration
|
||||
instance.ReplicaConfiguration = replicaConfiguration
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("master_instance_name"); ok {
|
||||
instance.MasterInstanceName = v.(string)
|
||||
// Modifying a replica during Create can cause problems if the master is
|
||||
// modified at the same time. Lock the master until we're done in order
|
||||
// to prevent that.
|
||||
if !sqlDatabaseIsMaster(d) {
|
||||
mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName))
|
||||
defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName))
|
||||
}
|
||||
|
||||
op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 {
|
||||
if googleapiError, ok := err.(*googleapi.Error); ok && googleapiError.Code == 409 {
|
||||
return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name)
|
||||
} else {
|
||||
return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err)
|
||||
@ -715,9 +491,9 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{})
|
||||
return err
|
||||
}
|
||||
|
||||
// If a default root user was created with a wildcard ('%') hostname, delete it. Note that if the resource is a
|
||||
// replica, then any users are inherited from the master instance and should be left alone.
|
||||
if !sqlResourceIsReplica(d) {
|
||||
// If a default root user was created with a wildcard ('%') hostname, delete it.
|
||||
// Users in a replica instance are inherited from the master instance and should be left alone.
|
||||
if sqlDatabaseIsMaster(d) {
|
||||
var users *sqladmin.UsersListResponse
|
||||
err = retryTime(func() error {
|
||||
users, err = config.clientSqlAdmin.Users.List(project, instance.Name).Do()
|
||||
@ -745,6 +521,151 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandSqlDatabaseInstanceSettings(configured []interface{}, secondGen bool) *sqladmin.Settings {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_settings := configured[0].(map[string]interface{})
|
||||
settings := &sqladmin.Settings{
|
||||
Tier: _settings["tier"].(string),
|
||||
ForceSendFields: []string{"StorageAutoResize"},
|
||||
ActivationPolicy: _settings["activation_policy"].(string),
|
||||
AvailabilityType: _settings["availability_type"].(string),
|
||||
CrashSafeReplicationEnabled: _settings["crash_safe_replication"].(bool),
|
||||
DataDiskSizeGb: int64(_settings["disk_size"].(int)),
|
||||
DataDiskType: _settings["disk_type"].(string),
|
||||
PricingPlan: _settings["pricing_plan"].(string),
|
||||
ReplicationType: _settings["replication_type"].(string),
|
||||
UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})),
|
||||
BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})),
|
||||
DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})),
|
||||
AuthorizedGaeApplications: expandAuthorizedGaeApplications(_settings["authorized_gae_applications"].([]interface{})),
|
||||
IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})),
|
||||
LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})),
|
||||
MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})),
|
||||
}
|
||||
|
||||
// 1st Generation instances don't support the disk_autoresize parameter
|
||||
// and it defaults to true - so we shouldn't set it if this is first gen
|
||||
if secondGen {
|
||||
settings.StorageAutoResize = googleapi.Bool(_settings["disk_autoresize"].(bool))
|
||||
}
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func expandReplicaConfiguration(configured []interface{}) *sqladmin.ReplicaConfiguration {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_replicaConfiguration := configured[0].(map[string]interface{})
|
||||
return &sqladmin.ReplicaConfiguration{
|
||||
FailoverTarget: _replicaConfiguration["failover_target"].(bool),
|
||||
|
||||
// MysqlReplicaConfiguration has been flattened in the TF schema, so
|
||||
// we'll keep it flat here instead of another expand method.
|
||||
MysqlReplicaConfiguration: &sqladmin.MySqlReplicaConfiguration{
|
||||
CaCertificate: _replicaConfiguration["ca_certificate"].(string),
|
||||
ClientCertificate: _replicaConfiguration["client_certificate"].(string),
|
||||
ClientKey: _replicaConfiguration["client_key"].(string),
|
||||
ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)),
|
||||
DumpFilePath: _replicaConfiguration["dump_file_path"].(string),
|
||||
MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)),
|
||||
Password: _replicaConfiguration["password"].(string),
|
||||
SslCipher: _replicaConfiguration["ssl_cipher"].(string),
|
||||
Username: _replicaConfiguration["username"].(string),
|
||||
VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func expandMaintenanceWindow(configured []interface{}) *sqladmin.MaintenanceWindow {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
window := configured[0].(map[string]interface{})
|
||||
return &sqladmin.MaintenanceWindow{
|
||||
Day: int64(window["day"].(int)),
|
||||
Hour: int64(window["hour"].(int)),
|
||||
UpdateTrack: window["update_track"].(string),
|
||||
}
|
||||
}
|
||||
|
||||
func expandLocationPreference(configured []interface{}) *sqladmin.LocationPreference {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_locationPreference := configured[0].(map[string]interface{})
|
||||
return &sqladmin.LocationPreference{
|
||||
FollowGaeApplication: _locationPreference["follow_gae_application"].(string),
|
||||
Zone: _locationPreference["zone"].(string),
|
||||
}
|
||||
}
|
||||
|
||||
func expandIpConfiguration(configured []interface{}) *sqladmin.IpConfiguration {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_ipConfiguration := configured[0].(map[string]interface{})
|
||||
return &sqladmin.IpConfiguration{
|
||||
Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool),
|
||||
RequireSsl: _ipConfiguration["require_ssl"].(bool),
|
||||
AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()),
|
||||
}
|
||||
}
|
||||
func expandAuthorizedNetworks(configured []interface{}) []*sqladmin.AclEntry {
|
||||
an := make([]*sqladmin.AclEntry, 0, len(configured))
|
||||
for _, _acl := range configured {
|
||||
_entry := _acl.(map[string]interface{})
|
||||
an = append(an, &sqladmin.AclEntry{
|
||||
ExpirationTime: _entry["expiration_time"].(string),
|
||||
Name: _entry["name"].(string),
|
||||
Value: _entry["value"].(string),
|
||||
})
|
||||
}
|
||||
|
||||
return an
|
||||
}
|
||||
|
||||
func expandAuthorizedGaeApplications(configured []interface{}) []string {
|
||||
aga := make([]string, 0, len(configured))
|
||||
for _, app := range configured {
|
||||
aga = append(aga, app.(string))
|
||||
}
|
||||
return aga
|
||||
}
|
||||
|
||||
func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags {
|
||||
databaseFlags := make([]*sqladmin.DatabaseFlags, 0, len(configured))
|
||||
for _, _flag := range configured {
|
||||
_entry := _flag.(map[string]interface{})
|
||||
|
||||
databaseFlags = append(databaseFlags, &sqladmin.DatabaseFlags{
|
||||
Name: _entry["name"].(string),
|
||||
Value: _entry["value"].(string),
|
||||
})
|
||||
}
|
||||
return databaseFlags
|
||||
}
|
||||
|
||||
func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration {
|
||||
if len(configured) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_backupConfiguration := configured[0].(map[string]interface{})
|
||||
return &sqladmin.BackupConfiguration{
|
||||
BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool),
|
||||
Enabled: _backupConfiguration["enabled"].(bool),
|
||||
StartTime: _backupConfiguration["start_time"].(string),
|
||||
}
|
||||
}
|
||||
|
||||
func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
@ -1311,9 +1232,9 @@ func instanceMutexKey(project, instance_name string) string {
|
||||
return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name)
|
||||
}
|
||||
|
||||
// sqlResourceIsReplica returns true if the provided schema.ResourceData represents a replica SQL instance, and false
|
||||
// otherwise.
|
||||
func sqlResourceIsReplica(d *schema.ResourceData) bool {
|
||||
// sqlDatabaseIsMaster returns true if the provided schema.ResourceData represents a
|
||||
// master SQL Instance, and false if it is a replica.
|
||||
func sqlDatabaseIsMaster(d *schema.ResourceData) bool {
|
||||
_, ok := d.GetOk("master_instance_name")
|
||||
return ok
|
||||
return !ok
|
||||
}
|
||||
|
72
website/docs/r/app_engine_application.html.markdown
Executable file
72
website/docs/r/app_engine_application.html.markdown
Executable file
@ -0,0 +1,72 @@
|
||||
---
|
||||
layout: "google"
|
||||
page_title: "Google: google_app_engine_application"
|
||||
sidebar_current: "docs-google-app-engine-application"
|
||||
description: |-
|
||||
Allows management of an App Engine application.
|
||||
---
|
||||
|
||||
# google_app_engine_application
|
||||
|
||||
Allows creation and management of an App Engine application.
|
||||
|
||||
~> App Engine applications cannot be deleted once they're created; you have to delete the
|
||||
entire project to delete the application. Terraform will report the application has been
|
||||
successfully deleted; this is a limitation of Terraform, and will go away in the future.
|
||||
Terraform is not able to delete App Engine applications.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```hcl
|
||||
resource "google_project" "my_project" {
|
||||
name = "My Project"
|
||||
project_id = "your-project-id"
|
||||
org_id = "1234567"
|
||||
}
|
||||
|
||||
resource "google_app_engine_application" "app" {
|
||||
project = "${google_project.my_project.project_id}"
|
||||
location_id = "us-central'
|
||||
}
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `location_id` - (Required) The [location](https://cloud.google.com/appengine/docs/locations)
|
||||
to serve the app from.
|
||||
|
||||
* `auth_domain` - (Optional) The domain to authenticate users with when using App Engine's User API.
|
||||
|
||||
* `serving_status` - (Optional) The serving status of the app.
|
||||
|
||||
* `feature_settings` - (Optional) A block of optional settings to configure specific App Engine features:
|
||||
|
||||
* `split_health_checks` - (Optional) Set to false to use the legacy health check instead of the readiness
|
||||
and liveness checks.
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
In addition to the arguments listed above, the following computed attributes are
|
||||
exported:
|
||||
|
||||
* `name` - Unique name of the app, usually `apps/{PROJECT_ID}`
|
||||
|
||||
* `url_dispatch_rule` - A list of dispatch rule blocks. Each block has a `domain`, `path`, and `service` field.
|
||||
|
||||
* `code_bucket` - The GCS bucket code is being stored in for this app.
|
||||
|
||||
* `default_hostname` - The default hostname for this app.
|
||||
|
||||
* `default_bucket` - The GCS bucket content is being stored in for this app.
|
||||
|
||||
* `gcr_domain` - The GCR domain used for storing managed Docker images for this app.
|
||||
|
||||
## Import
|
||||
|
||||
Applications can be imported using the ID of the project the application belongs to, e.g.
|
||||
|
||||
```
|
||||
$ terraform import google_app_engine_application.app your-project-id
|
||||
```
|
@ -18,10 +18,12 @@ Creates a Google Bigtable instance. For more information see
|
||||
```hcl
|
||||
resource "google_bigtable_instance" "instance" {
|
||||
name = "tf-instance"
|
||||
cluster_id = "tf-instance-cluster"
|
||||
zone = "us-central1-b"
|
||||
num_nodes = 3
|
||||
storage_type = "HDD"
|
||||
cluster {
|
||||
cluster_id = "tf-instance-cluster"
|
||||
zone = "us-central1-b"
|
||||
num_nodes = 3
|
||||
storage_type = "HDD"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -31,21 +33,35 @@ The following arguments are supported:
|
||||
|
||||
* `name` - (Required) The name of the Cloud Bigtable instance.
|
||||
|
||||
* `cluster_id` - (Required) The ID of the Cloud Bigtable cluster.
|
||||
|
||||
* `zone` - (Required) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations).
|
||||
|
||||
* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance.
|
||||
|
||||
* `instance_type` - (Optional) The instance type to create. One of `"DEVELOPMENT"` or `"PRODUCTION"`. Defaults to `"PRODUCTION"`.
|
||||
|
||||
* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`.
|
||||
|
||||
* `project` - (Optional) The ID of the project in which the resource belongs. If it
|
||||
is not provided, the provider project is used.
|
||||
|
||||
* `display_name` - (Optional) The human-readable display name of the Bigtable instance. Defaults to the instance `name`.
|
||||
|
||||
* `cluster` - (Optional) A block of cluster configuration options. Either `cluster` or `cluster_id` must be used. Only one cluster may be specified. See structure below.
|
||||
|
||||
* `cluster_id` - (Optional, Deprecated) The ID of the Cloud Bigtable cluster. Use `cluster.cluster_id` instead.
|
||||
|
||||
* `zone` - (Optional, Deprecated) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations). Use `cluster.zone` instead.
|
||||
|
||||
* `num_nodes` - (Optional, Deprecated) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance. Use `cluster.num_nodes` instead.
|
||||
|
||||
* `storage_type` - (Optional, Deprecated) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`. Use `cluster.storage_type` instead.
|
||||
|
||||
-----
|
||||
|
||||
`cluster` supports the following arguments:
|
||||
|
||||
* `cluster_id` - (Required) The ID of the Cloud Bigtable cluster.
|
||||
|
||||
* `zone` - (Optional) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations).
|
||||
|
||||
* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance.
|
||||
|
||||
* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`.
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
Only the arguments listed above are exposed as attributes.
|
||||
|
@ -64,17 +64,36 @@ The following arguments are supported:
|
||||
|
||||
* `entry_point` - (Optional) Name of a JavaScript function that will be executed when the Google Cloud Function is triggered.
|
||||
|
||||
* `event_trigger` - (Optional) A source that fires events in response to a condition in another service. Structure is documented below. Cannot be used with `trigger_http`.
|
||||
|
||||
* `trigger_http` - (Optional) Boolean variable. Any HTTP request (of a supported type) to the endpoint will trigger function execution. Supported HTTP request types are: POST, PUT, GET, DELETE, and OPTIONS. Endpoint is returned as `https_trigger_url`. Cannot be used with `trigger_bucket` and `trigger_topic`.
|
||||
|
||||
* `trigger_bucket` - (Optional) Google Cloud Storage bucket name. Every change in files in this bucket will trigger function execution. Cannot be used with `trigger_http` and `trigger_topic`.
|
||||
Deprecated. Use `event_trigger` instead.
|
||||
|
||||
* `trigger_topic` - (Optional) Name of Pub/Sub topic. Every message published in this topic will trigger function execution with message contents passed as input data. Cannot be used with `trigger_http` and `trigger_bucket`.
|
||||
Deprecated. Use `event_trigger` instead.
|
||||
|
||||
* `labels` - (Optional) A set of key/value label pairs to assign to the function.
|
||||
|
||||
* `environment_variables` - (Optional) A set of key/value environment variable pairs to assign to the function.
|
||||
|
||||
* `retry_on_failure` - (Optional) Whether the function should be retried on failure. This only applies to bucket and topic triggers, not HTTPS triggers.
|
||||
Deprecated. Use `event_trigger.failure_policy.retry` instead.
|
||||
|
||||
The `event_trigger` block supports:
|
||||
|
||||
* `event_type` - (Required) The type of event to observe. For example: `"providers/cloud.storage/eventTypes/object.change"`
|
||||
and `"providers/cloud.pubsub/eventTypes/topic.publish"`. See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/)
|
||||
for a full reference. Only Cloud Storage and Cloud Pub/Sub triggers are supported at this time.
|
||||
|
||||
* `resource` - (Required) Required. The name of the resource from which to observe events, for example, `"myBucket"`
|
||||
|
||||
* `failure_policy` - (Optional) Specifies policy for failed executions. Structure is documented below.
|
||||
|
||||
The `failure_policy` block supports:
|
||||
|
||||
* `retry` - (Required) Whether the function should be retried on failure. Defaults to `false`.
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
|
622
website/docs/r/monitoring_alert_policy.html.markdown
Normal file
622
website/docs/r/monitoring_alert_policy.html.markdown
Normal file
@ -0,0 +1,622 @@
|
||||
---
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# This file is automatically generated by Magic Modules and manual
|
||||
# changes will be clobbered when the file is regenerated.
|
||||
#
|
||||
# Please read more about how to change this file in
|
||||
# .github/CONTRIBUTING.md.
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
layout: "google"
|
||||
page_title: "Google: google_monitoring_alert_policy"
|
||||
sidebar_current: "docs-google-monitoring-alert-policy"
|
||||
description: |-
|
||||
A description of the conditions under which some aspect of your system is
|
||||
considered to be "unhealthy" and the ways to notify people or services
|
||||
about this state.
|
||||
---
|
||||
|
||||
# google\_monitoring\_alert\_policy
|
||||
|
||||
A description of the conditions under which some aspect of your system is
|
||||
considered to be "unhealthy" and the ways to notify people or services
|
||||
about this state.
|
||||
|
||||
To get more information about AlertPolicy, see:
|
||||
|
||||
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
|
||||
* How-to Guides
|
||||
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Basic Usage
|
||||
```hcl
|
||||
resource "google_monitoring_alert_policy" "basic" {
|
||||
display_name = "Test Policy Basic"
|
||||
combiner = "OR"
|
||||
conditions = [
|
||||
{
|
||||
display_name = "test condition"
|
||||
condition_threshold {
|
||||
filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""
|
||||
duration = "60s"
|
||||
comparison = "COMPARISON_GT"
|
||||
aggregations = [
|
||||
{
|
||||
alignment_period = "60s"
|
||||
per_series_aligner = "ALIGN_RATE"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
|
||||
* `display_name` -
|
||||
(Required)
|
||||
A short name or phrase used to identify the policy in
|
||||
dashboards, notifications, and incidents. To avoid confusion, don't use
|
||||
the same display name for multiple policies in the same project. The
|
||||
name is limited to 512 Unicode characters.
|
||||
|
||||
* `combiner` -
|
||||
(Required)
|
||||
How to combine the results of multiple conditions to
|
||||
determine if an incident should be opened.
|
||||
|
||||
* `enabled` -
|
||||
(Required)
|
||||
Whether or not the policy is enabled.
|
||||
|
||||
* `conditions` -
|
||||
(Required)
|
||||
A list of conditions for the policy. The conditions are combined by
|
||||
AND or OR according to the combiner field. If the combined conditions
|
||||
evaluate to true, then an incident is created. A policy can have from
|
||||
one to six conditions. Structure is documented below.
|
||||
|
||||
|
||||
The `conditions` block supports:
|
||||
|
||||
* `condition_absent` -
|
||||
(Optional)
|
||||
A condition that checks that a time series
|
||||
continues to receive new data points. Structure is documented below.
|
||||
|
||||
* `name` -
|
||||
The unique resource name for this condition.
|
||||
Its syntax is:
|
||||
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
|
||||
[CONDITION_ID] is assigned by Stackdriver Monitoring when
|
||||
the condition is created as part of a new or updated alerting
|
||||
policy.
|
||||
|
||||
* `condition_threshold` -
|
||||
(Optional)
|
||||
A condition that compares a time series against a
|
||||
threshold. Structure is documented below.
|
||||
|
||||
* `display_name` -
|
||||
(Required)
|
||||
A short name or phrase used to identify the
|
||||
condition in dashboards, notifications, and
|
||||
incidents. To avoid confusion, don't use the same
|
||||
display name for multiple conditions in the same
|
||||
policy.
|
||||
|
||||
|
||||
The `condition_absent` block supports:
|
||||
|
||||
* `aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series as well as how to
|
||||
combine the retrieved time series together
|
||||
(such as when aggregating multiple streams
|
||||
on each resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resrouces).
|
||||
Multiple aggregations are applied in the
|
||||
order specified. Structure is documented below.
|
||||
|
||||
* `trigger` -
|
||||
(Optional)
|
||||
The number/percent of time series for which
|
||||
the comparison must hold in order for the
|
||||
condition to trigger. If unspecified, then
|
||||
the condition will trigger if the comparison
|
||||
is true for any of the time series that have
|
||||
been identified by filter and aggregations. Structure is documented below.
|
||||
|
||||
* `duration` -
|
||||
(Required)
|
||||
The amount of time that a time series must
|
||||
fail to report new data to be considered
|
||||
failing. Currently, only values that are a
|
||||
multiple of a minute--e.g. 60s, 120s, or 300s
|
||||
--are supported.
|
||||
|
||||
* `filter` -
|
||||
(Optional)
|
||||
A filter that identifies which time series
|
||||
should be compared with the threshold.The
|
||||
filter is similar to the one that is
|
||||
specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
|
||||
The `aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
The `trigger` block supports:
|
||||
|
||||
* `percent` -
|
||||
(Optional)
|
||||
The percentage of time series that
|
||||
must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
* `count` -
|
||||
(Optional)
|
||||
The absolute number of time series
|
||||
that must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
The `condition_threshold` block supports:
|
||||
|
||||
* `threshold_value` -
|
||||
(Optional)
|
||||
A value against which to compare the time
|
||||
series.
|
||||
|
||||
* `denominator_filter` -
|
||||
(Optional)
|
||||
A filter that identifies a time series that
|
||||
should be used as the denominator of a ratio
|
||||
that will be compared with the threshold. If
|
||||
a denominator_filter is specified, the time
|
||||
series specified by the filter field will be
|
||||
used as the numerator.The filter is similar
|
||||
to the one that is specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
* `denominator_aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series selected by
|
||||
denominatorFilter as well as how to combine
|
||||
the retrieved time series together (such as
|
||||
when aggregating multiple streams on each
|
||||
resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resources).When
|
||||
computing ratios, the aggregations and
|
||||
denominator_aggregations fields must use the
|
||||
same alignment period and produce time
|
||||
series that have the same periodicity and
|
||||
labels.This field is similar to the one in
|
||||
the MetricService.ListTimeSeries request. It
|
||||
is advisable to use the ListTimeSeries
|
||||
method when debugging this field. Structure is documented below.
|
||||
|
||||
* `duration` -
|
||||
(Required)
|
||||
The amount of time that a time series must
|
||||
violate the threshold to be considered
|
||||
failing. Currently, only values that are a
|
||||
multiple of a minute--e.g., 0, 60, 120, or
|
||||
300 seconds--are supported. If an invalid
|
||||
value is given, an error will be returned.
|
||||
When choosing a duration, it is useful to
|
||||
keep in mind the frequency of the underlying
|
||||
time series data (which may also be affected
|
||||
by any alignments specified in the
|
||||
aggregations field); a good duration is long
|
||||
enough so that a single outlier does not
|
||||
generate spurious alerts, but short enough
|
||||
that unhealthy states are detected and
|
||||
alerted on quickly.
|
||||
|
||||
* `comparison` -
|
||||
(Required)
|
||||
The comparison to apply between the time
|
||||
series (indicated by filter and aggregation)
|
||||
and the threshold (indicated by
|
||||
threshold_value). The comparison is applied
|
||||
on each time series, with the time series on
|
||||
the left-hand side and the threshold on the
|
||||
right-hand side. Only COMPARISON_LT and
|
||||
COMPARISON_GT are supported currently.
|
||||
|
||||
* `trigger` -
|
||||
(Optional)
|
||||
The number/percent of time series for which
|
||||
the comparison must hold in order for the
|
||||
condition to trigger. If unspecified, then
|
||||
the condition will trigger if the comparison
|
||||
is true for any of the time series that have
|
||||
been identified by filter and aggregations,
|
||||
or by the ratio, if denominator_filter and
|
||||
denominator_aggregations are specified. Structure is documented below.
|
||||
|
||||
* `aggregations` -
|
||||
(Optional)
|
||||
Specifies the alignment of data points in
|
||||
individual time series as well as how to
|
||||
combine the retrieved time series together
|
||||
(such as when aggregating multiple streams
|
||||
on each resource to a single stream for each
|
||||
resource or when aggregating streams across
|
||||
all members of a group of resrouces).
|
||||
Multiple aggregations are applied in the
|
||||
order specified.This field is similar to the
|
||||
one in the MetricService.ListTimeSeries
|
||||
request. It is advisable to use the
|
||||
ListTimeSeries method when debugging this
|
||||
field. Structure is documented below.
|
||||
|
||||
* `filter` -
|
||||
(Optional)
|
||||
A filter that identifies which time series
|
||||
should be compared with the threshold.The
|
||||
filter is similar to the one that is
|
||||
specified in the
|
||||
MetricService.ListTimeSeries request (that
|
||||
call is useful to verify the time series
|
||||
that will be retrieved / processed) and must
|
||||
specify the metric type and optionally may
|
||||
contain restrictions on resource type,
|
||||
resource labels, and metric labels. This
|
||||
field may not exceed 2048 Unicode characters
|
||||
in length.
|
||||
|
||||
|
||||
The `denominator_aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
The `trigger` block supports:
|
||||
|
||||
* `percent` -
|
||||
(Optional)
|
||||
The percentage of time series that
|
||||
must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
* `count` -
|
||||
(Optional)
|
||||
The absolute number of time series
|
||||
that must fail the predicate for the
|
||||
condition to be triggered.
|
||||
|
||||
The `aggregations` block supports:
|
||||
|
||||
* `per_series_aligner` -
|
||||
(Optional)
|
||||
The approach to be used to align
|
||||
individual time series. Not all
|
||||
alignment functions may be applied
|
||||
to all time series, depending on
|
||||
the metric type and value type of
|
||||
the original time series.
|
||||
Alignment may change the metric
|
||||
type or the value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
* `group_by_fields` -
|
||||
(Optional)
|
||||
The set of fields to preserve when
|
||||
crossSeriesReducer is specified.
|
||||
The groupByFields determine how
|
||||
the time series are partitioned
|
||||
into subsets prior to applying the
|
||||
aggregation function. Each subset
|
||||
contains time series that have the
|
||||
same value for each of the
|
||||
grouping fields. Each individual
|
||||
time series is a member of exactly
|
||||
one subset. The crossSeriesReducer
|
||||
is applied to each subset of time
|
||||
series. It is not possible to
|
||||
reduce across different resource
|
||||
types, so this field implicitly
|
||||
contains resource.type. Fields not
|
||||
specified in groupByFields are
|
||||
aggregated away. If groupByFields
|
||||
is not specified and all the time
|
||||
series have the same resource
|
||||
type, then the time series are
|
||||
aggregated into a single output
|
||||
time series. If crossSeriesReducer
|
||||
is not defined, this field is
|
||||
ignored.
|
||||
|
||||
* `alignment_period` -
|
||||
(Optional)
|
||||
The alignment period for per-time
|
||||
series alignment. If present,
|
||||
alignmentPeriod must be at least
|
||||
60 seconds. After per-time series
|
||||
alignment, each time series will
|
||||
contain data points only on the
|
||||
period boundaries. If
|
||||
perSeriesAligner is not specified
|
||||
or equals ALIGN_NONE, then this
|
||||
field is ignored. If
|
||||
perSeriesAligner is specified and
|
||||
does not equal ALIGN_NONE, then
|
||||
this field must be defined;
|
||||
otherwise an error is returned.
|
||||
|
||||
* `cross_series_reducer` -
|
||||
(Optional)
|
||||
The approach to be used to combine
|
||||
time series. Not all reducer
|
||||
functions may be applied to all
|
||||
time series, depending on the
|
||||
metric type and the value type of
|
||||
the original time series.
|
||||
Reduction may change the metric
|
||||
type of value type of the time
|
||||
series.Time series data must be
|
||||
aligned in order to perform cross-
|
||||
time series reduction. If
|
||||
crossSeriesReducer is specified,
|
||||
then perSeriesAligner must be
|
||||
specified and not equal ALIGN_NONE
|
||||
and alignmentPeriod must be
|
||||
specified; otherwise, an error is
|
||||
returned.
|
||||
|
||||
- - -
|
||||
|
||||
|
||||
* `notification_channels` -
|
||||
(Optional)
|
||||
Identifies the notification channels to which notifications should be
|
||||
sent when incidents are opened or closed or when new violations occur
|
||||
on an already opened incident. Each element of this array corresponds
|
||||
to the name field in each of the NotificationChannel objects that are
|
||||
returned from the notificationChannels.list method. The syntax of the
|
||||
entries in this field is
|
||||
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
|
||||
|
||||
* `labels` -
|
||||
(Optional)
|
||||
User-supplied key/value data to be used for organizing AlertPolicy objects.
|
||||
* `project` - (Optional) The ID of the project in which the resource belongs.
|
||||
If it is not provided, the provider project is used.
|
||||
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
In addition to the arguments listed above, the following computed attributes are exported:
|
||||
|
||||
|
||||
* `name` -
|
||||
The unique resource name for this policy.
|
||||
Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
|
||||
|
||||
* `creation_record` -
|
||||
A read-only record of the creation of the alerting policy.
|
||||
If provided in a call to create or update, this field will
|
||||
be ignored. Structure is documented below.
|
||||
|
||||
|
||||
The `creation_record` block contains:
|
||||
|
||||
* `mutate_time` -
|
||||
When the change occurred.
|
||||
|
||||
* `mutated_by` -
|
||||
The email address of the user making the change.
|
||||
|
||||
|
||||
## Import
|
||||
|
||||
AlertPolicy can be imported using any of these accepted formats:
|
||||
|
||||
```
|
||||
$ terraform import google_monitoring_alert_policy.default {{name}}
|
||||
```
|
@ -132,6 +132,15 @@
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-google-app-engine") %>>
|
||||
<a href="#">Google App Engine Resources</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-google-app-engine-application") %>>
|
||||
<a href="/docs/providers/google/r/app_engine_application.html">google_app_engine_application</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-google-bigquery") %>>
|
||||
<a href="#">Google BigQuery Resources</a>
|
||||
<ul class="nav nav-visible">
|
||||
@ -689,6 +698,16 @@
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-google-monitoring") %>>
|
||||
<a href="#">Google Stackdriver Monitoring Resources</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-google-monitoring-alert-policy") %>>
|
||||
<a href="/docs/providers/google/r/monitoring_alert_policy.html">google_monitoring_alert_policy</a>
|
||||
</li>
|
||||
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-google-storage") %>>
|
||||
<a href="#">Google Storage Resources</a>
|
||||
<ul class="nav nav-visible">
|
||||
|
Loading…
Reference in New Issue
Block a user